diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 00000000..3f2baa11 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,3 @@ +[alias] +docs-notarization = "doc -p notarization" +docs-audit-trail = "doc -p audit_trail" diff --git a/.github/actions/publish/wasm/action.yml b/.github/actions/publish/wasm/action.yml index 4fc39627..ba44b905 100644 --- a/.github/actions/publish/wasm/action.yml +++ b/.github/actions/publish/wasm/action.yml @@ -10,6 +10,9 @@ inputs: working-directory: description: "Directory to publish from" required: true + artifact-download-path: + description: "Directory to download artifacts to (defaults to working-directory)" + required: false dry-run: description: "'true' = only log potential result; 'false' = publish'" required: true @@ -27,7 +30,7 @@ runs: uses: actions/download-artifact@v4 with: name: ${{ inputs.input-artifact-name }} - path: bindings/wasm/notarization_wasm + path: ${{ inputs.artifact-download-path || inputs.working-directory }} - name: Publish WASM bindings to NPM shell: sh diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 830ff1b5..657bdcdf 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -17,11 +17,13 @@ on: - ".github/workflows/shared-build-wasm.yml" - ".github/actions/**" - "**.rs" + - "**.move" - "**.toml" - "**.lock" - "bindings/**" - "!bindings/**.md" - "bindings/wasm/notarization_wasm/README.md" # the Readme contain txm tests + - "bindings/wasm/audit_trail_wasm/README.md" schedule: # * is a special character in YAML so you have to quote this string @@ -165,17 +167,27 @@ jobs: - name: test Notarization Move package if: matrix.os != 'windows-latest' - # publish the package and set the IOTA_NOTARIZATION_PKG_ID env variable - run: | - iota move test + run: iota move test working-directory: notarization-move + - name: test Audit Trail Move package + if: matrix.os != 'windows-latest' + run: iota move test + working-directory: audit-trail-move + - name: publish Notarization Move package if: matrix.os != 'windows-latest' - # publish the package and set the IOTA_NOTARIZATION_PKG_ID env variable run: echo "IOTA_NOTARIZATION_PKG_ID=$(./publish_package.sh)" >> "$GITHUB_ENV" working-directory: notarization-move/scripts/ + - name: publish Audit Trail Move package + if: matrix.os != 'windows-latest' + run: | + eval "$(./publish_package.sh)" + echo "IOTA_AUDIT_TRAIL_PKG_ID=$IOTA_AUDIT_TRAIL_PKG_ID" >> "$GITHUB_ENV" + echo "IOTA_TF_COMPONENTS_PKG_ID=$IOTA_TF_COMPONENTS_PKG_ID" >> "$GITHUB_ENV" + working-directory: audit-trail-move/scripts/ + - name: Run tests if: matrix.os != 'windows-latest' run: cargo test --workspace --release -- --test-threads=1 @@ -210,7 +222,7 @@ jobs: uses: "./.github/actions/rust/sccache/stop" with: os: ${{matrix.os}} - build-wasm: + build-wasm-notarization: needs: check-for-run-condition if: ${{ needs.check-for-run-condition.outputs.should-run == 'true' }} uses: "./.github/workflows/shared-build-wasm.yml" @@ -218,8 +230,18 @@ jobs: run-unit-tests: false output-artifact-name: notarization-wasm-bindings-build - test-wasm: - needs: build-wasm + build-wasm-audit-trail: + needs: check-for-run-condition + if: ${{ needs.check-for-run-condition.outputs.should-run == 'true' }} + uses: "./.github/workflows/shared-build-wasm.yml" + with: + run-unit-tests: false + output-artifact-name: audit-trail-wasm-bindings-build + wasm-package-dir: bindings/wasm/audit_trail_wasm + wasm-crate-name: audit_trail_wasm + + test-wasm-notarization: + needs: [build-wasm-notarization, check-for-run-condition] if: ${{ needs.check-for-run-condition.outputs.should-run == 'true' }} runs-on: ubuntu-24.04 strategy: @@ -253,21 +275,53 @@ jobs: iota-version: ${{ env.IOTA_VERSION }} - name: publish Notarization Move package - if: matrix.os != 'windows-latest' - # publish the package and set the IOTA_NOTARIZATION_PKG_ID env variable run: echo "IOTA_NOTARIZATION_PKG_ID=$(./publish_package.sh)" >> "$GITHUB_ENV" working-directory: notarization-move/scripts/ + - name: Run Wasm examples + run: npm run test:node + working-directory: bindings/wasm/notarization_wasm + + test-wasm-audit-trail: + needs: [build-wasm-audit-trail, check-for-run-condition] + if: ${{ needs.check-for-run-condition.outputs.should-run == 'true' }} + runs-on: ubuntu-24.04 + + steps: + - uses: actions/checkout@v3 + + - name: Set up Node.js + uses: actions/setup-node@v1 + with: + node-version: 20.x + - name: Install JS dependencies run: npm ci - working-directory: bindings/wasm/notarization_wasm + working-directory: bindings/wasm/audit_trail_wasm + + - name: Download bindings/wasm/audit_trail_wasm artifacts + uses: actions/download-artifact@v4 + with: + name: audit-trail-wasm-bindings-build + path: bindings/wasm/audit_trail_wasm + + - name: Start iota sandbox + uses: "./.github/actions/iota/setup" + with: + iota-version: ${{ env.IOTA_VERSION }} + + - name: publish Audit Trail Move package + run: | + eval "$(./publish_package.sh)" + echo "IOTA_AUDIT_TRAIL_PKG_ID=$IOTA_AUDIT_TRAIL_PKG_ID" >> "$GITHUB_ENV" + echo "IOTA_TF_COMPONENTS_PKG_ID=$IOTA_TF_COMPONENTS_PKG_ID" >> "$GITHUB_ENV" + working-directory: audit-trail-move/scripts/ - name: Run Wasm examples - #run: npm run test:readme && npm run test:node run: npm run test:node - working-directory: bindings/wasm/notarization_wasm - test-wasm-browser: - needs: build-wasm + working-directory: bindings/wasm/audit_trail_wasm + test-wasm-browser-notarization: + needs: [build-wasm-notarization, check-for-run-condition] if: ${{ needs.check-for-run-condition.outputs.should-run == 'true' }} runs-on: ubuntu-24.04 strategy: @@ -316,3 +370,57 @@ jobs: - name: Run cypress run: docker run --network host cypress-test test:browser:${{ matrix.browser }} + + test-wasm-browser-audit-trail: + needs: [build-wasm-audit-trail, check-for-run-condition] + if: ${{ needs.check-for-run-condition.outputs.should-run == 'true' }} + runs-on: ubuntu-24.04 + strategy: + fail-fast: false + matrix: + browser: [chrome, firefox] + + steps: + - uses: actions/checkout@v3 + + - name: Set up Node.js + uses: actions/setup-node@v1 + with: + node-version: 20.x + + - name: Install JS dependencies + run: npm ci + working-directory: bindings/wasm/audit_trail_wasm + + - name: Download bindings/wasm/audit_trail_wasm artifacts + uses: actions/download-artifact@v4 + with: + name: audit-trail-wasm-bindings-build + path: bindings/wasm/audit_trail_wasm + + - name: Start iota sandbox + uses: "./.github/actions/iota/setup" + with: + iota-version: ${{ env.IOTA_VERSION }} + + - name: publish Audit Trail Move package + run: | + eval "$(./publish_package.sh)" + echo "IOTA_AUDIT_TRAIL_PKG_ID=$IOTA_AUDIT_TRAIL_PKG_ID" >> "$GITHUB_ENV" + echo "IOTA_TF_COMPONENTS_PKG_ID=$IOTA_TF_COMPONENTS_PKG_ID" >> "$GITHUB_ENV" + working-directory: audit-trail-move/scripts/ + + - name: Build Docker image + uses: docker/build-push-action@v6.2.0 + with: + context: bindings/wasm/ + file: bindings/wasm/audit_trail_wasm/cypress/Dockerfile + push: false + tags: cypress-audit-trail:latest + load: true + build-args: | + IOTA_AUDIT_TRAIL_PKG_ID=${{ env.IOTA_AUDIT_TRAIL_PKG_ID }} + IOTA_TF_COMPONENTS_PKG_ID=${{ env.IOTA_TF_COMPONENTS_PKG_ID }} + + - name: Run cypress + run: docker run --network host cypress-audit-trail test:browser:${{ matrix.browser }} diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml index b542cd19..16239453 100644 --- a/.github/workflows/clippy.yml +++ b/.github/workflows/clippy.yml @@ -49,3 +49,9 @@ jobs: if: ${{ false }} with: args: --manifest-path ./bindings/wasm/notarization_wasm/Cargo.toml --target wasm32-unknown-unknown --all-targets --all-features -- -D warnings + + - name: Wasm clippy check audit_trail_wasm + uses: actions-rs-plus/clippy-check@b09a9c37c9df7db8b1a5d52e8fe8e0b6e3d574c4 + if: ${{ false }} + with: + args: --manifest-path ./bindings/wasm/audit_trail_wasm/Cargo.toml --target wasm32-unknown-unknown --all-targets --all-features -- -D warnings diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index eeb124f3..13e1a5c9 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -49,6 +49,9 @@ jobs: - name: wasm fmt check notarization_wasm run: cargo +nightly fmt --manifest-path ./bindings/wasm/notarization_wasm/Cargo.toml --all -- --check + - name: wasm fmt check audit_trail_wasm + run: cargo +nightly fmt --manifest-path ./bindings/wasm/audit_trail_wasm/Cargo.toml --all -- --check + - name: fmt check with dprint run: dprint check @@ -61,6 +64,10 @@ jobs: - name: Install prettier-plugin-move run: npm i @mysten/prettier-plugin-move - - name: prettier-move check + - name: prettier-move check notarization-move working-directory: notarization-move run: npx prettier-move -c **/*.move + + - name: prettier-move check audit-trail-move + working-directory: audit-trail-move + run: npx prettier-move -c **/*.move diff --git a/.github/workflows/shared-build-wasm.yml b/.github/workflows/shared-build-wasm.yml index 9e4a432a..4f8c954b 100644 --- a/.github/workflows/shared-build-wasm.yml +++ b/.github/workflows/shared-build-wasm.yml @@ -19,6 +19,16 @@ on: description: "Name used for the output build artifact" required: true type: string + wasm-package-dir: + description: "Relative path to the wasm package directory (e.g. bindings/wasm/notarization_wasm)" + required: false + type: string + default: "bindings/wasm/notarization_wasm" + wasm-crate-name: + description: "Name of the wasm crate (e.g. notarization_wasm)" + required: false + type: string + default: "notarization_wasm" jobs: build-wasm: defaults: @@ -52,6 +62,7 @@ jobs: sccache-enabled: true sccache-path: ${{ matrix.sccache-path }} target-cache-path: bindings/wasm/target + target-cache-key-suffix: ${{ inputs.wasm-crate-name }} # Download a pre-compiled wasm-bindgen binary. - name: Install wasm-bindgen-cli @@ -71,16 +82,16 @@ jobs: - name: Install JS dependencies run: npm ci - working-directory: bindings/wasm/notarization_wasm + working-directory: ${{ inputs.wasm-package-dir }} - name: Build WASM bindings run: npm run build - working-directory: bindings/wasm/notarization_wasm + working-directory: ${{ inputs.wasm-package-dir }} - name: Run Node unit tests if: ${{ inputs.run-unit-tests }} run: npm run test:unit:node - working-directory: bindings/wasm/notarization_wasm + working-directory: ${{ inputs.wasm-package-dir }} - name: Stop sccache uses: "./.github/actions/rust/sccache/stop" @@ -92,9 +103,9 @@ jobs: with: name: ${{ inputs.output-artifact-name }} path: | - bindings/wasm/notarization_wasm/node - bindings/wasm/notarization_wasm/web - bindings/wasm/notarization_wasm/examples/dist - bindings/wasm/notarization_wasm/docs + ${{ inputs.wasm-package-dir }}/node + ${{ inputs.wasm-package-dir }}/web + ${{ inputs.wasm-package-dir }}/examples/dist + ${{ inputs.wasm-package-dir }}/docs if-no-files-found: error retention-days: 1 diff --git a/.github/workflows/upload-docs.yml b/.github/workflows/upload-docs.yml index a1579e29..5c2dda6e 100644 --- a/.github/workflows/upload-docs.yml +++ b/.github/workflows/upload-docs.yml @@ -8,6 +8,9 @@ on: version: description: "Version to publish docs under (e.g. `v1.2.3-dev.1`)" required: true + ref: + description: "Optional git ref to checkout before building docs" + required: false env: GH_TOKEN: ${{ github.token }} @@ -16,20 +19,34 @@ permissions: actions: "write" jobs: - build-wasm: + build-wasm-notarization: uses: "./.github/workflows/shared-build-wasm.yml" with: run-unit-tests: false ref: ${{ inputs.ref }} output-artifact-name: notarization-docs + build-wasm-audit-trail: + uses: "./.github/workflows/shared-build-wasm.yml" + with: + run-unit-tests: false + ref: ${{ inputs.ref }} + output-artifact-name: audit-trail-docs + wasm-package-dir: bindings/wasm/audit_trail_wasm + wasm-crate-name: audit_trail_wasm + upload-docs: runs-on: ubuntu-latest - needs: build-wasm + needs: [build-wasm-notarization, build-wasm-audit-trail] steps: - uses: actions/download-artifact@v4 with: name: notarization-docs + path: notarization-docs + - uses: actions/download-artifact@v4 + with: + name: audit-trail-docs + path: audit-trail-docs - name: Get release version id: get_release_version run: | @@ -42,12 +59,21 @@ jobs: echo VERSION=$VERSION >> $GITHUB_OUTPUT - name: Compress generated docs run: | - tar czvf wasm.tar.gz notarization/docs/* + tar czvf wasm.tar.gz notarization-docs/docs/* + tar czvf audit-trail-wasm.tar.gz audit-trail-docs/docs/* - - name: Upload docs to AWS S3 + - name: Upload notarization docs to AWS S3 env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_IOTA_WIKI }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IOTA_WIKI }} AWS_DEFAULT_REGION: "eu-central-1" run: | aws s3 cp wasm.tar.gz s3://files.iota.org/iota-wiki/iota-notarization/${{ steps.get_release_version.outputs.VERSION }}/ --acl public-read + + - name: Upload audit trail docs to AWS S3 + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_IOTA_WIKI }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IOTA_WIKI }} + AWS_DEFAULT_REGION: "eu-central-1" + run: | + aws s3 cp audit-trail-wasm.tar.gz s3://files.iota.org/iota-wiki/iota-audit-trail/${{ steps.get_release_version.outputs.VERSION }}/ --acl public-read diff --git a/.github/workflows/wasm-publish.yml b/.github/workflows/wasm-publish.yml index 5cbee4e1..5b86d731 100644 --- a/.github/workflows/wasm-publish.yml +++ b/.github/workflows/wasm-publish.yml @@ -27,6 +27,13 @@ on: retag-tag: description: "RETAG - Tag to set" required: false + package: + description: "Which package to publish/retag" + required: true + type: choice + options: + - notarization + - audit-trail permissions: id-token: write # Required for OIDC @@ -63,8 +70,8 @@ jobs: check_vars retag_version retag_tag fi - build-wasm: - if: ${{ github.event.inputs.operation == 'publish' }} + build-wasm-notarization: + if: ${{ github.event.inputs.operation == 'publish' && github.event.inputs.package == 'notarization' }} needs: [check-inputs] uses: "./.github/workflows/shared-build-wasm.yml" with: @@ -72,10 +79,21 @@ jobs: ref: ${{ github.event.inputs.publish-branch }} output-artifact-name: notarization-wasm-bindings-build - release-wasm: - if: ${{ github.event.inputs.operation == 'publish' }} + build-wasm-audit-trail: + if: ${{ github.event.inputs.operation == 'publish' && github.event.inputs.package == 'audit-trail' }} + needs: [check-inputs] + uses: "./.github/workflows/shared-build-wasm.yml" + with: + run-unit-tests: false + ref: ${{ github.event.inputs.publish-branch }} + output-artifact-name: audit-trail-wasm-bindings-build + wasm-package-dir: bindings/wasm/audit_trail_wasm + wasm-crate-name: audit_trail_wasm + + release-wasm-notarization: + if: ${{ github.event.inputs.operation == 'publish' && github.event.inputs.package == 'notarization' }} runs-on: ubuntu-latest - needs: [build-wasm] + needs: [build-wasm-notarization] steps: - name: Checkout uses: actions/checkout@v4 @@ -89,6 +107,23 @@ jobs: working-directory: ./bindings/wasm/notarization_wasm tag: ${{ github.event.inputs.publish-tag }} + release-wasm-audit-trail: + if: ${{ github.event.inputs.operation == 'publish' && github.event.inputs.package == 'audit-trail' }} + runs-on: ubuntu-latest + needs: [build-wasm-audit-trail] + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.publish-branch }} + - name: Release to npm + uses: "./.github/actions/publish/wasm" + with: + dry-run: ${{ github.event.inputs.publish-dry-run }} + input-artifact-name: audit-trail-wasm-bindings-build + working-directory: ./bindings/wasm/audit_trail_wasm + tag: ${{ github.event.inputs.publish-tag }} + retag-wasm: if: ${{ github.event.inputs.operation == 'retag' }} needs: [check-inputs] @@ -100,7 +135,14 @@ jobs: node-version: "lts/*" registry-url: "https://registry.npmjs.org" - - name: Run dist-tag + - name: Run dist-tag notarization + if: ${{ github.event.inputs.package == 'notarization' }} shell: bash run: | npm dist-tag add @iota/notarization@${{ github.event.inputs.retag-version }} ${{ github.event.inputs.retag-tag }} + + - name: Run dist-tag audit-trail + if: ${{ github.event.inputs.package == 'audit-trail' }} + shell: bash + run: | + npm dist-tag add @iota/audit-trail@${{ github.event.inputs.retag-version }} ${{ github.event.inputs.retag-tag }} diff --git a/.github/workflows/wasm-retag-npm.yml b/.github/workflows/wasm-retag-npm.yml index 3830bec7..e51b3f0d 100644 --- a/.github/workflows/wasm-retag-npm.yml +++ b/.github/workflows/wasm-retag-npm.yml @@ -9,6 +9,13 @@ on: version: description: "version to set" required: true + package: + description: "Which package to retag" + required: true + type: choice + options: + - notarization + - audit-trail jobs: release-wasm: @@ -21,9 +28,18 @@ jobs: node-version: "20.x" registry-url: "https://registry.npmjs.org" - - name: Run dist-tag + - name: Run dist-tag notarization + if: ${{ github.event.inputs.package == 'notarization' }} shell: sh env: NODE_AUTH_TOKEN: ${{ secrets.NPM_NOTARIZATION_TOKEN }} run: | npm dist-tag add @iota/notarization@${{ github.event.inputs.version }} ${{ github.event.inputs.tag }} + + - name: Run dist-tag audit-trail + if: ${{ github.event.inputs.package == 'audit-trail' }} + shell: sh + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_NOTARIZATION_TOKEN }} + run: | + npm dist-tag add @iota/audit-trail@${{ github.event.inputs.version }} ${{ github.event.inputs.tag }} diff --git a/.gitignore b/.gitignore index d29419b0..fca3c3a3 100644 --- a/.gitignore +++ b/.gitignore @@ -6,10 +6,12 @@ *.code-workspace .idea +.history .DS_Store /notarization-move/build/* /bindings/wasm/notarization_wasm/docs/* # ignore folder created in CI for downloaded iota binaries /iota/ -/toml-cli/ \ No newline at end of file +/toml-cli/ +/audit-trail-move/build \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000..4f84d9d1 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,217 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Project Overview + +IOTA Notarization enables creation of immutable, on-chain records for arbitrary data by storing it (or a hash) in dedicated Move objects on the IOTA ledger. The workspace has two main subsystems: **Notarization** (creating tamper-proof records) and **Audit Trails** (structured, role-based audit logging). + +## Common Commands + +### Build & Check + +```bash +cargo build --workspace --tests --examples +cargo check -p notarization-rs +cargo check -p audit-trail-rs +``` + +### Test + +```bash +# Tests must run single-threaded (IOTA sandbox requirement) +cargo test --workspace --release -- --test-threads=1 + +# Single test +cargo test --release -p notarization-rs test_name -- --test-threads=1 + +# Move contract tests (from notarization-move/ or audit-trail-move/) +iota move test +``` + +### Lint & Format + +```bash +cargo clippy --all-targets --all-features +cargo fmt --all +cargo fmt --all -- --check # check only +``` + +### WASM Bindings (in bindings/wasm/notarization_wasm/ or audit_trail_wasm/) + +```bash +npm install +npm run build +npm test # Node.js tests +npm run test:browser # Cypress browser tests +``` + +### Move Scripts + +```bash +# From notarization-move/ or audit-trail-move/ +./scripts/publish_package.sh +./scripts/notarize.sh +``` + +### Running Examples + +Examples require the relevant Move package to be published first. + +**Notarization examples** — from the repo root: + +```bash +# Publish the package and capture the package ID +export IOTA_NOTARIZATION_PKG_ID=$(./notarization-move/scripts/publish_package.sh) + +# Run a specific example +cargo run --release --example +``` + +To run all notarization examples: + +```bash +# Make sure IOTA_NOTARIZATION_PKG_ID is set as shown above +./examples/run.sh +``` + +**Audit Trail examples** — from the repo root: + +```bash +# Publish the package; on localnet both vars are set to the same package ID +eval $(./audit-trail-move/scripts/publish_package.sh) + +# Run a specific example +cargo run --release --example +``` + +The `eval` form is required because the publish script prints shell `export` statements for two variables: + +- `IOTA_AUDIT_TRAIL_PKG_ID` — the audit trail package ID +- `IOTA_TF_COMPONENTS_PKG_ID` — the TfComponents package ID (equals `IOTA_AUDIT_TRAIL_PKG_ID` on localnet) + +## Developing Examples + +### Adding a new example + +1. Create the source file under `examples/notarization/` or `examples/audit-trail/`. +2. Add an `[[example]]` entry to `examples/Cargo.toml` pointing to the new file. +3. Use `examples::get_funded_notarization_client()` (notarization) or `examples::get_funded_audit_trail_client()` (audit trail) from `examples/utils/utils.rs` to obtain a funded, signed client. Do not inline client construction in example files. + +### Audit Trail example patterns + +Reference implementation: `examples/audit-trail/01_create_audit_trail.rs` + +**Client setup** — `get_funded_audit_trail_client()` reads `IOTA_AUDIT_TRAIL_PKG_ID` and `IOTA_TF_COMPONENTS_PKG_ID` from the environment and returns `AuditTrailClient`. + +**Creating a trail** — use the builder returned by `client.create_trail()`: + +```rust +let created = client + .create_trail() + .with_trail_metadata(ImmutableMetadata::new("name".into(), Some("description".into()))) + .with_updatable_metadata("mutable status string") + .with_initial_record(InitialRecord::new(Data::text("content"), Some("metadata".into()), None)) + .finish() + .build_and_execute(&client) + .await? + .output; // TrailCreated { trail_id, creator, timestamp } +``` + +The creator automatically receives an Admin capability object in their wallet. + +**Defining a role** — use the trail handle's access API with the implicit Admin capability: + +```rust +client + .trail(trail_id) + .access() + .for_role("RecordAdmin") + .create(PermissionSet::record_admin_permissions(), None) + .build_and_execute(&client) + .await?; +``` + +`PermissionSet` convenience constructors: `admin_permissions()`, `record_admin_permissions()`, `locking_admin_permissions()`, `tag_admin_permissions()`, `cap_admin_permissions()`, `metadata_admin_permissions()`. + +**Issuing a capability** — mint a capability object for a role: + +```rust +let cap = client + .trail(trail_id) + .access() + .for_role("RecordAdmin") + .issue_capability(CapabilityIssueOptions::default()) + .build_and_execute(&client) + .await? + .output; // CapabilityIssued { capability_id, target_key, role, issued_to, valid_from, valid_until } +``` + +Use `CapabilityIssueOptions { issued_to, valid_from_ms, valid_until_ms }` to restrict who may use the capability or set a validity window. + +**Key types** (from `audit_trail::core::types`): `Data`, `InitialRecord`, `ImmutableMetadata`, `LockingConfig`, `LockingWindow`, `TimeLock`, `Permission`, `PermissionSet`, `CapabilityIssueOptions`, `RoleTags`. + +### Notarization example patterns + +Reference implementations: `examples/notarization/01_create_locked_notarization.rs` and `examples/notarization/02_create_dynamic_notarization.rs`. + +Use `examples::get_funded_notarization_client()` to get a `NotarizationClient`. Read `audit-trail-rs/tests/e2e/` for detailed usage of every API surface. + +## Workspace Structure + +The root `Cargo.toml` defines a workspace with members: `notarization-rs`, `audit-trail-rs`, `examples`. The WASM crates (`bindings/wasm/*`) are excluded from the workspace and built separately. + +- **`notarization-rs/`** — Rust client library for notarization +- **`notarization-move/`** — Move smart contracts for notarization +- **`audit-trail-rs/`** — Rust client library for audit trails +- **`audit-trail-move/`** — Move smart contracts for audit trails +- **`bindings/wasm/notarization_wasm/`** — JS/TS WASM bindings for notarization +- **`bindings/wasm/audit_trail_wasm/`** — JS/TS WASM bindings for audit trails +- **`examples/`** — Rust examples (basic CRUD + real-world scenarios like IoT, legal contracts) + +## Architecture + +### Client Layer Pattern + +Both `notarization-rs` and `audit-trail-rs` follow the same pattern: + +- **Full client** (`NotarizationClient` / `AuditTrailClient`): Signs and submits transactions +- **Read-only client** (`NotarizationClientReadOnly` / `AuditTrailClientReadOnly`): Read-only state inspection +- Clients wrap a `product_common` transaction builder that supports `.build()`, `.build_and_execute()`, and `.execute_with_gas_station()` + +### Builder Pattern (Type-State) + +Notarization creation uses a `NotarizationBuilder` with phantom type states to enforce valid configurations at compile time. Separate builder paths exist for **Dynamic** (mutable, transferable) vs **Locked** (immutable, non-transferable) notarizations. + +### Method Types + +- **Dynamic**: State and metadata are updatable after creation; supports transfer locks +- **Locked**: State and metadata are immutable; supports time-based destruction + +### Lock System + +- **Transfer locks**: `None`, `UnlockAt(epoch)`, `UntilDestroyed` +- **Delete locks**: Restrict when a notarization can be destroyed + +### Cross-Platform Compilation + +Code uses `#[cfg(target_arch = "wasm32")]` guards to conditionally compile for WASM. Features `send-sync`, `gas-station`, `default-http-client`, and `irl` control optional capabilities. + +### Key External Dependencies + +- `iota-sdk` (v1.19.1, from IOTA git) — on-chain interaction +- `iota_interaction` / `iota_interaction_rust` / `iota_interaction_ts` — from `product-core` repo, `feat/tf-compoenents-dev` branch +- `product_common` — transaction builder abstraction from `product-core` +- `secret-storage` (v0.3.0) — key management + +## Testing Requirements + +- Tests require an IOTA sandbox running locally +- Always use `--test-threads=1` (tests share sandbox state) +- Notarization examples require `IOTA_NOTARIZATION_PKG_ID` environment variable set to the deployed package ID +- Audit trail examples require `IOTA_AUDIT_TRAIL_PKG_ID` (and `IOTA_TF_COMPONENTS_PKG_ID` on localnet) — use `eval $(./audit-trail-move/scripts/publish_package.sh)` to set both +- WASM browser tests use Cypress + +## Rust Version + +Minimum: **1.85**, Edition: **2024** diff --git a/Cargo.toml b/Cargo.toml index 6ac6edbd..e507745c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,28 +8,28 @@ rust-version = "1.85" [workspace] resolver = "2" -members = ["examples", "notarization-rs"] -exclude = ["bindings/wasm/notarization_wasm"] +members = ["audit-trail-rs", "examples", "notarization-rs"] +exclude = ["bindings/wasm/notarization_wasm", "bindings/wasm/audit_trail_wasm"] [workspace.dependencies] anyhow = "1.0" async-trait = "0.1" bcs = "0.1" +chrono = { version = "0.4", default-features = false } hyper = "1" -iota-sdk = { git = "https://github.com/iotaledger/iota.git", package = "iota-sdk", tag = "v1.20.1" } -iota_interaction = { git = "https://github.com/iotaledger/product-core.git", tag = "v0.8.15", default-features = false, package = "iota_interaction" } -iota_interaction_rust = { git = "https://github.com/iotaledger/product-core.git", tag = "v0.8.15", default-features = false, package = "iota_interaction_rust" } -iota_interaction_ts = { git = "https://github.com/iotaledger/product-core.git", tag = "v0.8.15", default-features = false, package = "iota_interaction_ts" } -product_common = { git = "https://github.com/iotaledger/product-core.git", tag = "v0.8.15", default-features = false, package = "product_common" } +iota-sdk = { git = "https://github.com/iotaledger/iota.git", package = "iota-sdk", tag = "v1.19.1" } +iota_interaction = { git = "https://github.com/iotaledger/product-core.git", branch = "feat/tf-compoenents-dev", default-features = false, package = "iota_interaction" } +iota_interaction_rust = { git = "https://github.com/iotaledger/product-core.git", branch = "feat/tf-compoenents-dev", default-features = false, package = "iota_interaction_rust" } +iota_interaction_ts = { git = "https://github.com/iotaledger/product-core.git", branch = "feat/tf-compoenents-dev", default-features = false, package = "iota_interaction_ts" } +product_common = { git = "https://github.com/iotaledger/product-core.git", branch = "feat/tf-compoenents-dev", default-features = false, package = "product_common" } +secret-storage = { git = "https://github.com/iotaledger/secret-storage.git", tag = "v0.3.0", default-features = false } serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } +serde-aux = { version = "4.7.0", default-features = false } serde_json = { version = "1.0", default-features = false } +sha2 = { version = "0.10", default-features = false } strum = { version = "0.27", default-features = false, features = ["std", "derive"] } thiserror = { version = "2.0", default-features = false } - -chrono = { version = "0.4", default-features = false } -secret-storage = { git = "https://github.com/iotaledger/secret-storage.git", tag = "v0.3.0", default-features = false } -sha2 = { version = "0.10", default-features = false } -tokio = { version = "1.49.0", default-features = false, features = ["macros", "sync", "rt", "process"] } +tokio = { version = "1.46.1", default-features = false, features = ["macros", "sync", "rt", "process"] } [profile.release.package.iota_interaction_ts] opt-level = 's' diff --git a/README.md b/README.md index 0efb11a3..b4593d6f 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,8 @@

Introduction ◈ + Where To Start ◈ + ToolkitsDocumentation & ResourcesBindingsContributing @@ -16,41 +18,105 @@ --- -# IOTA Notarization +# IOTA Notarization And Audit Trail ## Introduction -IOTA Notarization enables the creation of immutable, on-chain records for any arbitrary data. This is achieved by storing the data, or a hash of it, inside a dedicated Move object on the IOTA ledger. This process provides a verifiable, timestamped proof of the data's existence and integrity at a specific point in time. +This repository contains two complementary IOTA ledger toolkits for verifiable on-chain data workflows: -IOTA Notarization is composed of two primary components: +- **IOTA Notarization** + Best when you want a proof object for arbitrary data, documents, hashes, or latest-state notarization flows. +- **IOTA Audit Trail** + Best when you want shared audit records with sequential entries, role-based access control, locking, and tagging. -- **Notarization Move Package**: The on-chain smart contracts that define the behavior and structure of notarization objects. -- **Notarization Library (Rust/Wasm)**: A client-side library that provides developers with convenient functions to create, manage, and verify `Notarization` objects on the network. +Each toolkit is available as: -## Documentation and Resources +- a **Move package** for the on-chain contracts +- a **Rust SDK** for typed client access and transaction builders +- **wasm bindings** for JavaScript and TypeScript integrations -- [Notarization Documentation Pages](https://docs.iota.org/developer/iota-notarization): Supplementing documentation with context around notarization and simple examples on library usage. -- API References: - - [Rust API Reference](https://iotaledger.github.io/notarization/notarization/index.html): Package documentation (cargo docs). +## Where To Start - +### I want to notarize data -- Examples: - - [Rust Examples](https://github.com/iotaledger/notarization/tree/main/examples/README.md): Practical code snippets to get you started with the library in Rust. - - [Wasm Examples](https://github.com/iotaledger/notarization/tree/main/bindings/wasm/notarization_wasm/examples/README.md): Practical code snippets to get you started with the library in TypeScript/JavaScript. +Use **IOTA Notarization** when your main need is proving the existence, integrity, or latest state of data on-chain. + +- [Notarization Rust SDK](./notarization-rs) +- [Notarization Move Package](./notarization-move) +- [Notarization Wasm SDK](./bindings/wasm/notarization_wasm) +- [Notarization examples](./bindings/wasm/notarization_wasm/examples/README.md) + +### I want audit records + +Use **IOTA Audit Trail** when you need shared audit records with permissions, capabilities, tagging, and write or delete controls. + +- [Audit Trail Rust SDK](./audit-trail-rs) +- [Audit Trail Move Package](./audit-trail-move) +- [Audit Trail Wasm SDK](./bindings/wasm/audit_trail_wasm) +- [Audit Trail examples](./bindings/wasm/audit_trail_wasm/examples/README.md) + +### I want the on-chain contracts + +- [Notarization Move](./notarization-move) +- [Audit Trail Move](./audit-trail-move) + +### I want application SDKs + +- [Notarization Rust](./notarization-rs) +- [Audit Trail Rust](./audit-trail-rs) +- [Notarization Wasm](./bindings/wasm/notarization_wasm) +- [Audit Trail Wasm](./bindings/wasm/audit_trail_wasm) + +## Toolkits + +| Toolkit | Best for | Move Package | Rust SDK | Wasm SDK | +| ------------ | ------------------------------------------------------------------------ | ------------------------------------------ | -------------------------------------- | -------------------------------------------------------- | +| Notarization | Proof objects for documents, hashes, and updatable notarized state | [`notarization-move`](./notarization-move) | [`notarization-rs`](./notarization-rs) | [`notarization_wasm`](./bindings/wasm/notarization_wasm) | +| Audit Trail | Shared sequential records with roles, capabilities, tagging, and locking | [`audit-trail-move`](./audit-trail-move) | [`audit-trail-rs`](./audit-trail-rs) | [`audit_trail_wasm`](./bindings/wasm/audit_trail_wasm) | + +### Which one should I use? + +| Need | Best fit | +| ------------------------------------------------------------------------- | ------------ | +| Immutable or updatable proof object for arbitrary data | Notarization | +| Simple proof-of-existence or latest-state notarization flow | Notarization | +| Shared sequential records with roles, capabilities, and record tag policy | Audit Trail | +| Team or system audit log with governance and operational controls | Audit Trail | + +## Documentation And Resources + +### IOTA Notarization + +- [Notarization Rust SDK README](./notarization-rs/README.md) +- [Notarization Move Package README](./notarization-move/README.md) +- [Notarization Wasm README](./bindings/wasm/notarization_wasm/README.md) +- [Notarization examples](./bindings/wasm/notarization_wasm/examples/README.md) +- [IOTA Notarization Docs Portal](https://docs.iota.org/developer/iota-notarization) + +### IOTA Audit Trail + +- [Audit Trail Rust SDK README](./audit-trail-rs/README.md) +- [Audit Trail Move Package README](./audit-trail-move/README.md) +- [Audit Trail Wasm README](./bindings/wasm/audit_trail_wasm/README.md) +- [Audit Trail examples](./bindings/wasm/audit_trail_wasm/examples/README.md) + +### Shared + +- [Repository examples](./examples/README.md) ## Bindings -[Foreign Function Interface (FFI)](https://en.wikipedia.org/wiki/Foreign_function_interface) Bindings of this [Rust](https://www.rust-lang.org/) library to other programming languages: +[Foreign Function Interface (FFI)](https://en.wikipedia.org/wiki/Foreign_function_interface) bindings available in this repository: -- [Web Assembly](https://github.com/iotaledger/notarization/tree/main/bindings/wasm/notarization_wasm) (JavaScript/TypeScript) +- [Web Assembly for IOTA Notarization](./bindings/wasm/notarization_wasm) +- [Web Assembly for IOTA Audit Trail](./bindings/wasm/audit_trail_wasm) ## Contributing -We would love to have you help us with the development of IOTA Notarization. Each and every contribution is greatly valued! +We would love to have you help us with the development of IOTA Notarization and Audit Trail. Each and every contribution is greatly valued. Please review the [contribution](https://docs.iota.org/developer/iota-notarization/contribute) sections in the [IOTA Docs Portal](https://docs.iota.org/developer/iota-notarization/). -To contribute directly to the repository, simply fork the project, push your changes to your fork and create a pull request to get them included! +To contribute directly to the repository, simply fork the project, push your changes to your fork and create a pull request to get them included. -The best place to get involved in discussions about this library or to look for support at is the `#notarization` channel on the [IOTA Discord](https://discord.gg/iota-builders). You can also ask questions on our [Stack Exchange](https://iota.stackexchange.com/). +The best place to get involved in discussions about these libraries or to look for support at is the `#notarization` channel on the [IOTA Discord](https://discord.gg/iota-builders). You can also ask questions on our [Stack Exchange](https://iota.stackexchange.com/). diff --git a/audit-trail-move/.prettierignore b/audit-trail-move/.prettierignore new file mode 100644 index 00000000..a007feab --- /dev/null +++ b/audit-trail-move/.prettierignore @@ -0,0 +1 @@ +build/* diff --git a/audit-trail-move/.prettierrc b/audit-trail-move/.prettierrc new file mode 100644 index 00000000..0ceb3060 --- /dev/null +++ b/audit-trail-move/.prettierrc @@ -0,0 +1,8 @@ +{ + "tabWidth": 4, + "printWidth": 100, + "useModuleLabel": true, + "autoGroupImports": "package", + "enableErrorDebug": false, + "wrapComments": false +} \ No newline at end of file diff --git a/audit-trail-move/Move.history.json b/audit-trail-move/Move.history.json new file mode 100644 index 00000000..04e94921 --- /dev/null +++ b/audit-trail-move/Move.history.json @@ -0,0 +1,10 @@ +{ + "aliases": { + "testnet": "2304aa97" + }, + "envs": { + "2304aa97": [ + "0x7655d346145e2ba7fcb6a5c63b4b9ec18a92c435364206e5c3f3dfd8cb95d98d" + ] + } +} \ No newline at end of file diff --git a/audit-trail-move/Move.lock b/audit-trail-move/Move.lock new file mode 100644 index 00000000..b36d8ec9 --- /dev/null +++ b/audit-trail-move/Move.lock @@ -0,0 +1,73 @@ +# @generated by Move, please check-in and do not edit manually. + +[move] +version = 3 +manifest_digest = "EBCB35B368C39FD9E190F502DC6A07A51CF87960E25EC4439DCBF9FBA8307B3C" +deps_digest = "397E6A9F7A624706DBDFEE056CE88391A15876868FD18A88504DA74EB458D697" +dependencies = [ + { id = "Iota", name = "Iota" }, + { id = "IotaSystem", name = "IotaSystem" }, + { id = "MoveStdlib", name = "MoveStdlib" }, + { id = "Stardust", name = "Stardust" }, + { id = "TfComponents", name = "TfComponents" }, +] + +[[move.package]] +id = "Iota" +source = { git = "https://github.com/iotaledger/iota.git", rev = "b1b37ed9d5ff64cbbfb3aa1ebd9b9431a0337311", subdir = "crates/iota-framework/packages/iota-framework" } + +dependencies = [ + { id = "MoveStdlib", name = "MoveStdlib" }, +] + +[[move.package]] +id = "IotaSystem" +source = { git = "https://github.com/iotaledger/iota.git", rev = "b1b37ed9d5ff64cbbfb3aa1ebd9b9431a0337311", subdir = "crates/iota-framework/packages/iota-system" } + +dependencies = [ + { id = "Iota", name = "Iota" }, + { id = "MoveStdlib", name = "MoveStdlib" }, +] + +[[move.package]] +id = "MoveStdlib" +source = { git = "https://github.com/iotaledger/iota.git", rev = "b1b37ed9d5ff64cbbfb3aa1ebd9b9431a0337311", subdir = "crates/iota-framework/packages/move-stdlib" } + +[[move.package]] +id = "Stardust" +source = { git = "https://github.com/iotaledger/iota.git", rev = "b1b37ed9d5ff64cbbfb3aa1ebd9b9431a0337311", subdir = "crates/iota-framework/packages/stardust" } + +dependencies = [ + { id = "Iota", name = "Iota" }, + { id = "MoveStdlib", name = "MoveStdlib" }, +] + +[[move.package]] +id = "TfComponents" +source = { git = "https://github.com/iotaledger/product-core.git", rev = "feat/tf-compoenents-dev", subdir = "components_move" } + +dependencies = [ + { id = "Iota", name = "Iota" }, + { id = "IotaSystem", name = "IotaSystem" }, + { id = "MoveStdlib", name = "MoveStdlib" }, + { id = "Stardust", name = "Stardust" }, +] + +[move.toolchain-version] +compiler-version = "1.20.0-rc" +edition = "2024.beta" +flavor = "iota" + +[env] + +[env.localnet] +chain-id = "417321d4" +original-published-id = "0x70e6f4f0ba0d3fae15288bf254b34829ad3c122b7f73766b13233af6acaeb715" +latest-published-id = "0x70e6f4f0ba0d3fae15288bf254b34829ad3c122b7f73766b13233af6acaeb715" +published-version = "1" + +[env.testnet] +chain-id = "2304aa97" +original-published-id = "0x7655d346145e2ba7fcb6a5c63b4b9ec18a92c435364206e5c3f3dfd8cb95d98d" +latest-published-id = "0x7655d346145e2ba7fcb6a5c63b4b9ec18a92c435364206e5c3f3dfd8cb95d98d" +published-version = "1" diff --git a/audit-trail-move/Move.toml b/audit-trail-move/Move.toml new file mode 100644 index 00000000..9dfad22c --- /dev/null +++ b/audit-trail-move/Move.toml @@ -0,0 +1,9 @@ +[package] +name = "IotaAuditTrail" +edition = "2024.beta" + +[dependencies] +TfComponents = { git = "https://github.com/iotaledger/product-core.git", subdir = "components_move", rev = "feat/tf-compoenents-dev" } + +[addresses] +audit_trail = "0x0" diff --git a/audit-trail-move/README.md b/audit-trail-move/README.md new file mode 100644 index 00000000..41ab23fd --- /dev/null +++ b/audit-trail-move/README.md @@ -0,0 +1,90 @@ +![banner](https://github.com/iotaledger/notarization/raw/HEAD/.github/banner_notarization.png) + +

+ StackExchange + Discord + Apache 2.0 license +

+ +

+ Introduction ◈ + Modules ◈ + Development & Testing ◈ + Related Libraries ◈ + Contributing +

+ +--- + +# IOTA Audit Trail Move Package + +## Introduction + +`audit-trail-move` is the on-chain Move package behind IOTA Audit Trail. + +It defines the shared `AuditTrail` object and the supporting types needed for: + +- sequential record storage +- role-based access control through capabilities +- trail-wide locking for writes and deletions +- record tags and role tag restrictions +- immutable and updatable trail metadata +- emitted events for trail and record lifecycle changes + +The package depends on `TfComponents` for reusable capability, role-map, and timelock primitives. + +## Modules + +- `audit_trail::main` + Core shared object, events, trail lifecycle, record mutation, metadata updates, roles, and capabilities. +- `audit_trail::record` + Record payloads, initial records, and correction metadata. +- `audit_trail::locking` + Locking configuration and lock evaluation helpers. +- `audit_trail::permission` + Permission constructors and admin permission presets. +- `audit_trail::record_tags` + Tag registry and role tag helpers. + +## Development And Testing + +Build the Move package: + +```bash +cd audit-trail-move +iota move build +``` + +Run the Move test suite: + +```bash +cd audit-trail-move +iota move test +``` + +Publish locally: + +```bash +cd audit-trail-move +./scripts/publish_package.sh +``` + +The publish script prints `IOTA_AUDIT_TRAIL_PKG_ID` and, on `localnet`, also exports `IOTA_TF_COMPONENTS_PKG_ID`. + +The package history files [`Move.lock`](./Move.lock) and [`Move.history.json`](./Move.history.json) are used by the Rust SDK to resolve and track deployed package versions. + +## Related Libraries + +- [Rust SDK](https://github.com/iotaledger/notarization/tree/main/audit-trail-rs/README.md) +- [Wasm SDK](https://github.com/iotaledger/notarization/tree/main/bindings/wasm/audit_trail_wasm/README.md) +- [Repository Root](https://github.com/iotaledger/notarization/tree/main/README.md) + +## Contributing + +We would love to have you help us with the development of IOTA Audit Trail. Each and every contribution is greatly valued. + +Please review the [contribution](https://docs.iota.org/developer/iota-notarization/contribute) sections in the [IOTA Docs Portal](https://docs.iota.org/developer/iota-notarization/). + +To contribute directly to the repository, simply fork the project, push your changes to your fork and create a pull request to get them included. + +The best place to get involved in discussions about this package or to look for support at is the `#notarization` channel on the [IOTA Discord](https://discord.gg/iota-builders). You can also ask questions on our [Stack Exchange](https://iota.stackexchange.com/). diff --git a/audit-trail-move/scripts/publish_package.sh b/audit-trail-move/scripts/publish_package.sh new file mode 100755 index 00000000..d29a0e3c --- /dev/null +++ b/audit-trail-move/scripts/publish_package.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Copyright 2020-2026 IOTA Stiftung +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +script_dir=$(cd "$(dirname "$0")" && pwd) +package_dir="$script_dir/.." + +active_env=$(iota client active-env --json | jq -r '.') + +publish_args=( + iota client publish + --silence-warnings + --json + --gas-budget 500000000 +) + +if [[ "$active_env" == "localnet" ]]; then + publish_args+=(--with-unpublished-dependencies) +fi + +response=$("${publish_args[@]}" "$package_dir") + +audit_trail_package_id=$( + echo "$response" | jq -r ' + .objectChanges[] + | select(.type == "published") + | .packageId + ' +) + +if [[ -z "$audit_trail_package_id" || "$audit_trail_package_id" == "null" ]]; then + echo "$response" >&2 + echo "failed to extract audit_trail package ID from publish response" >&2 + exit 1 +fi + +export IOTA_AUDIT_TRAIL_PKG_ID="$audit_trail_package_id" +printf 'export IOTA_AUDIT_TRAIL_PKG_ID=%s\n' "$IOTA_AUDIT_TRAIL_PKG_ID" + +if [[ "$active_env" == "localnet" ]]; then + tf_components_package_id="$audit_trail_package_id" + + export IOTA_TF_COMPONENTS_PKG_ID="$tf_components_package_id" + printf 'export IOTA_TF_COMPONENTS_PKG_ID=%s\n' "$IOTA_TF_COMPONENTS_PKG_ID" +fi diff --git a/audit-trail-move/sources/audit_trail.move b/audit-trail-move/sources/audit_trail.move new file mode 100644 index 00000000..511e2b86 --- /dev/null +++ b/audit-trail-move/sources/audit_trail.move @@ -0,0 +1,1004 @@ +// Copyright (c) 2025 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/// Audit Trail with role-based access control and timelock +/// A trail is a tamper-proof, sequential chain of notarized records where each +/// entry references its predecessor, ensuring verifiable continuity and +/// integrity. +module audit_trail::main; + +use audit_trail::{ + locking::{ + Self, + LockingConfig, + LockingWindow, + set_config, + set_delete_record_window, + set_delete_trail_lock, + set_write_lock + }, + permission::{Self, Permission}, + record::{Self, Record, InitialRecord}, + record_tags::{Self, RoleTags, TagRegistry} +}; +use iota::{clock::{Self, Clock}, event, linked_table::{Self, LinkedTable}, vec_set::VecSet}; +use std::string::String; +use tf_components::{capability::Capability, role_map::{Self, RoleMap}, timelock::TimeLock}; + +// ===== Errors ===== +#[error] +const ERecordNotFound: vector = b"Record not found at the given sequence number"; +#[error] +const ERecordLocked: vector = b"The record is locked and cannot be deleted"; +#[error] +const ETrailNotEmpty: vector = b"Audit trail cannot be deleted while records still exist"; +#[error] +const ETrailDeleteLocked: vector = b"The audit trail is delete-locked"; +#[error] +const ETrailWriteLocked: vector = b"The audit trail is write-locked"; +#[error] +const EPackageVersionMismatch: vector = + b"The package version of the trail does not match the expected version"; +#[error] +const ERecordTagNotAllowed: vector = + b"The provided capability cannot create records with the requested tag"; +#[error] +const ERecordTagNotDefined: vector = b"The requested tag is not defined for this audit trail"; +#[error] +const ERecordTagAlreadyDefined: vector = + b"The requested tag is already defined for this audit trail"; +#[error] +const ERecordTagInUse: vector = + b"The requested tag cannot be removed because it is already used by an existing record or role"; +// ===== Constants ===== +const INITIAL_ADMIN_ROLE_NAME: vector = b"Admin"; + +// Package version, incremented when the package is updated +const PACKAGE_VERSION: u64 = 1; + +// ===== Core Structures ===== + +/// Metadata set at trail creation +public struct ImmutableMetadata has copy, drop, store { + name: String, + description: Option, +} + +/// A shared, tamper-evident ledger for storing sequential records with +/// role-based access control. +/// +/// It maintains an ordered sequence of records, each assigned a unique +/// auto-incrementing sequence number. +/// Uses capability-based RBAC to manage access to the trail and its records. +public struct AuditTrail has key { + id: UID, + /// Address that created this trail + creator: address, + /// Creation timestamp in milliseconds + created_at: u64, + /// Monotonic counter for sequence assignment (never decrements) + sequence_number: u64, + /// LinkedTable mapping sequence numbers to records + records: LinkedTable>, + /// Canonical list of tags that may be attached to records in this trail with their combined usage counts + tags: TagRegistry, + /// Deletion locking rules + locking_config: LockingConfig, + /// A list of role definitions consisting of a unique role specifier and a list of associated permissions + roles: RoleMap, + /// Set at creation, cannot be changed + immutable_metadata: Option, + /// Can be updated by holders of MetadataUpdate permission + updatable_metadata: Option, + /// Package version + version: u64, +} + +// ===== Events ===== + +/// Emitted when a new trail is created +public struct AuditTrailCreated has copy, drop { + trail_id: ID, + creator: address, + timestamp: u64, +} + +/// Emitted when the audit trail is deleted +public struct AuditTrailDeleted has copy, drop { + trail_id: ID, + timestamp: u64, +} + +/// Emitted when a record is added to the trail +public struct RecordAdded has copy, drop { + trail_id: ID, + sequence_number: u64, + added_by: address, + timestamp: u64, +} + +/// Emitted when a record is deleted from the trail +public struct RecordDeleted has copy, drop { + trail_id: ID, + sequence_number: u64, + deleted_by: address, + timestamp: u64, +} + +// ===== Constructors ===== + +/// Create immutable trail metadata +public fun new_trail_metadata(name: String, description: Option): ImmutableMetadata { + ImmutableMetadata { name, description } +} + +// ===== Trail Creation ===== + +/// Create a new audit trail with optional initial record +/// +/// Initial roles config +/// -------------------- +/// Initializes the `roles` map with only one role, called "Admin" which is associated with the permissions +/// * TrailDelete +/// * CapabilitiesAdd +/// * CapabilitiesRevoke +/// * RolesAdd +/// * RolesUpdate +/// * RolesDelete +/// +/// Returns +/// ------- +/// * Capability with *Admin* role, allowing the creator to define custom +/// roles and issue capabilities to other users. +/// * Trail ID +public fun create( + initial_record: Option>, + locking_config: LockingConfig, + trail_metadata: Option, + updatable_metadata: Option, + tags: vector, + clock: &Clock, + ctx: &mut TxContext, +): (Capability, ID) { + let creator = ctx.sender(); + let timestamp = clock::timestamp_ms(clock); + + let trail_uid = object::new(ctx); + let trail_id = object::uid_to_inner(&trail_uid); + let mut tags = record_tags::new_tag_registry(tags); + + let mut records = linked_table::new>(ctx); + let mut sequence_number = 0; + + if (initial_record.is_some()) { + let record = record::into_record( + initial_record.destroy_some(), + 0, + creator, + timestamp, + ); + + if (record::tag(&record).is_some()) { + let initial_tag = option::borrow(record::tag(&record)); + assert!(record_tags::contains(&tags, initial_tag), ERecordTagNotDefined); + record_tags::increment_usage_count(&mut tags, initial_tag); + }; + + linked_table::push_back(&mut records, 0, record); + sequence_number = 1; + } else { + initial_record.destroy_none(); + }; + + let role_admin_permissions = role_map::new_role_admin_permissions( + permission::add_roles(), + permission::delete_roles(), + permission::update_roles(), + ); + + let capability_admin_permissions = role_map::new_capability_admin_permissions( + permission::add_capabilities(), + permission::revoke_capabilities(), + ); + + let (roles, admin_cap) = role_map::new( + trail_id, + initial_admin_role_name(), + permission::admin_permissions(), + role_admin_permissions, + capability_admin_permissions, + ctx, + ); + + let trail = AuditTrail { + id: trail_uid, + creator, + created_at: timestamp, + sequence_number, + records, + tags, + locking_config, + roles, + immutable_metadata: trail_metadata, + updatable_metadata, + version: PACKAGE_VERSION, + }; + + transfer::share_object(trail); + + event::emit(AuditTrailCreated { + trail_id, + creator, + timestamp, + }); + + (admin_cap, trail_id) +} + +public fun initial_admin_role_name(): String { + INITIAL_ADMIN_ROLE_NAME.to_string() +} + +/// Migrate the trail to the latest package version +entry fun migrate( + self: &mut AuditTrail, + cap: &Capability, + clock: &Clock, + ctx: &TxContext, +) { + assert!(self.version < PACKAGE_VERSION, EPackageVersionMismatch); + self + .roles + .assert_capability_valid( + cap, + &permission::migrate_audit_trail(), + clock, + ctx, + ); + self.version = PACKAGE_VERSION; +} + +fun assert_record_tag_allowed( + self: &AuditTrail, + cap: &Capability, + tag: &Option, +) { + if (tag.is_none()) { + return + }; + + let requested_tag = option::borrow(tag); + assert!(record_tags::contains(&self.tags, requested_tag), ERecordTagNotDefined); + assert!(record_tags::role_allows(&self.roles, cap, requested_tag), ERecordTagNotAllowed); +} + +// ===== Record Operations ===== + +/// Add a record to the trail +/// +/// Records are added sequentially with auto-assigned sequence numbers. +public fun add_record( + self: &mut AuditTrail, + cap: &Capability, + stored_data: D, + record_metadata: Option, + record_tag: Option, + clock: &Clock, + ctx: &mut TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + self + .roles + .assert_capability_valid( + cap, + &permission::add_record(), + clock, + ctx, + ); + assert!(!locking::is_write_locked(&self.locking_config, clock), ETrailWriteLocked); + assert_record_tag_allowed(self, cap, &record_tag); + + let caller = ctx.sender(); + let timestamp = clock::timestamp_ms(clock); + let trail_id = self.id(); + let seq = self.sequence_number; + + if (record_tag.is_some()) { + record_tags::increment_usage_count(&mut self.tags, option::borrow(&record_tag)); + }; + + let record = record::new( + stored_data, + record_metadata, + record_tag, + seq, + caller, + timestamp, + record::empty(), + ); + + linked_table::push_back(&mut self.records, seq, record); + self.sequence_number = self.sequence_number + 1; + + event::emit(RecordAdded { + trail_id, + sequence_number: seq, + added_by: caller, + timestamp, + }); +} + +/// Delete a record from the trail by sequence number +/// +/// The record must not be locked (based on the trail's locking configuration). +/// Requires the DeleteRecord permission. +public fun delete_record( + self: &mut AuditTrail, + cap: &Capability, + sequence_number: u64, + clock: &Clock, + ctx: &mut TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + self + .roles + .assert_capability_valid( + cap, + &permission::delete_record(), + clock, + ctx, + ); + assert!(linked_table::contains(&self.records, sequence_number), ERecordNotFound); + assert_record_tag_allowed( + self, + cap, + record::tag(linked_table::borrow(&self.records, sequence_number)), + ); + assert!(!self.is_record_locked(sequence_number, clock), ERecordLocked); + + let caller = ctx.sender(); + let timestamp = clock::timestamp_ms(clock); + let trail_id = self.id(); + + let record = linked_table::remove(&mut self.records, sequence_number); + if (record::tag(&record).is_some()) { + record_tags::decrement_usage_count(&mut self.tags, option::borrow(record::tag(&record))); + }; + record::destroy(record); + + event::emit(RecordDeleted { + trail_id, + sequence_number, + deleted_by: caller, + timestamp, + }); +} + +/// Delete up to `limit` records from the front of the trail. +/// +/// Requires `DeleteAllRecords` permission. This operation bypasses record locks. +/// Returns the number of records deleted in this batch. +public fun delete_records_batch( + self: &mut AuditTrail, + cap: &Capability, + limit: u64, + clock: &Clock, + ctx: &mut TxContext, +): u64 { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + self + .roles + .assert_capability_valid( + cap, + &permission::delete_all_records(), + clock, + ctx, + ); + + let mut deleted = 0; + let caller = ctx.sender(); + let timestamp = clock.timestamp_ms(); + let trail_id = self.id(); + + while (deleted < limit && !self.records.is_empty()) { + let next_sequence_number = option::destroy_some(*linked_table::front(&self.records)); + assert_record_tag_allowed( + self, + cap, + record::tag(linked_table::borrow(&self.records, next_sequence_number)), + ); + let (sequence_number, record) = self.records.pop_front(); + + if (record::tag(&record).is_some()) { + record_tags::decrement_usage_count( + &mut self.tags, + option::borrow(record::tag(&record)), + ); + }; + + record.destroy(); + + event::emit(RecordDeleted { + trail_id, + sequence_number, + deleted_by: caller, + timestamp, + }); + + deleted = deleted + 1; + }; + + deleted +} + +/// Delete an empty audit trail. +/// +/// Requires `DeleteAuditTrail` permission and aborts if records still exist. +public fun delete_audit_trail( + self: AuditTrail, + cap: &Capability, + clock: &Clock, + ctx: &mut TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + self + .roles + .assert_capability_valid( + cap, + &permission::delete_audit_trail(), + clock, + ctx, + ); + assert!(!locking::is_delete_trail_locked(&self.locking_config, clock), ETrailDeleteLocked); + assert!(linked_table::is_empty(&self.records), ETrailNotEmpty); + + let trail_id = self.id(); + let timestamp = clock::timestamp_ms(clock); + + let AuditTrail { + id, + creator: _, + created_at: _, + sequence_number: _, + records, + tags, + locking_config: _, + roles, + immutable_metadata: _, + updatable_metadata: _, + version: _, + } = self; + + roles.destroy(); + linked_table::destroy_empty(records); + tags.destroy(); + + object::delete(id); + + event::emit(AuditTrailDeleted { trail_id, timestamp }); +} + +// ===== Locking ===== + +/// Check if a record is locked based on the trail's locking configuration. +/// Aborts with ERecordNotFound if the record doesn't exist. +public fun is_record_locked( + self: &AuditTrail, + sequence_number: u64, + clock: &Clock, +): bool { + assert!(linked_table::contains(&self.records, sequence_number), ERecordNotFound); + + let record = linked_table::borrow(&self.records, sequence_number); + let current_time = clock::timestamp_ms(clock); + + locking::is_delete_record_locked( + &self.locking_config, + sequence_number, + record::added_at(record), + self.sequence_number, + current_time, + ) +} + +/// Update the locking configuration. Requires `UpdateLockingConfig` permission. +public fun update_locking_config( + self: &mut AuditTrail, + cap: &Capability, + new_config: LockingConfig, + clock: &Clock, + ctx: &mut TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + self + .roles + .assert_capability_valid( + cap, + &permission::update_locking_config(), + clock, + ctx, + ); + set_config(&mut self.locking_config, new_config); +} + +/// Update the `delete_record_lock` locking configuration +public fun update_delete_record_window( + self: &mut AuditTrail, + cap: &Capability, + new_delete_record_lock: LockingWindow, + clock: &Clock, + ctx: &mut TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + self + .roles + .assert_capability_valid( + cap, + &permission::update_locking_config_for_delete_record(), + clock, + ctx, + ); + set_delete_record_window(&mut self.locking_config, new_delete_record_lock); +} + +/// Update the `delete_trail_lock` locking configuration. +public fun update_delete_trail_lock( + self: &mut AuditTrail, + cap: &Capability, + new_delete_trail_lock: TimeLock, + clock: &Clock, + ctx: &mut TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + self + .roles + .assert_capability_valid( + cap, + &permission::update_locking_config_for_delete_trail(), + clock, + ctx, + ); + set_delete_trail_lock(&mut self.locking_config, new_delete_trail_lock); +} + +/// Update the `write_lock` locking configuration. +public fun update_write_lock( + self: &mut AuditTrail, + cap: &Capability, + new_write_lock: TimeLock, + clock: &Clock, + ctx: &mut TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + self + .roles + .assert_capability_valid( + cap, + &permission::update_locking_config_for_write(), + clock, + ctx, + ); + set_write_lock(&mut self.locking_config, new_write_lock); +} + +/// Update the trail's mutable metadata +public fun update_metadata( + self: &mut AuditTrail, + cap: &Capability, + new_metadata: Option, + clock: &Clock, + ctx: &mut TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + self + .roles + .assert_capability_valid( + cap, + &permission::update_metadata(), + clock, + ctx, + ); + self.updatable_metadata = new_metadata; +} + +/// Adds a new record tag to the trail registry. +public fun add_record_tag( + self: &mut AuditTrail, + cap: &Capability, + tag: String, + clock: &Clock, + ctx: &TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + + self.roles.assert_capability_valid(cap, &permission::add_record_tags(), clock, ctx); + + assert!(!self.tags.contains(&tag), ERecordTagAlreadyDefined); + self.tags.insert_tag(tag, 0); +} + +/// Removes a record tag from the trail registry if it is not used by any record. +public fun remove_record_tag( + self: &mut AuditTrail, + cap: &Capability, + tag: String, + clock: &Clock, + ctx: &TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + + self.roles.assert_capability_valid(cap, &permission::delete_record_tags(), clock, ctx); + + assert!(self.tags.contains(&tag), ERecordTagNotDefined); + assert!(!self.tags.is_in_use(&tag), ERecordTagInUse); + + self.tags.remove_tag(&tag); +} + +// ===== Role and Capability Administration ===== + +/// Creates a new role with the provided permissions. +public fun create_role( + self: &mut AuditTrail, + cap: &Capability, + role: String, + permissions: VecSet, + role_tags: Option, + clock: &Clock, + ctx: &mut TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + + assert!(self.tags.contains_all_role_tags(&role_tags), ERecordTagNotDefined); + + role_map::create_role( + self.access_mut(), + cap, + role, + permissions, + copy role_tags, + clock, + ctx, + ); + + if (role_tags.is_some()) { + let tags = role_tags.borrow().tags().keys(); + let mut i = 0; + let tag_count = tags.length(); + + while (i < tag_count) { + self.tags.increment_usage_count(&tags[i]); + i = i + 1; + }; + }; +} + +/// Updates permissions for an existing role. +public fun update_role_permissions( + self: &mut AuditTrail, + cap: &Capability, + role: String, + new_permissions: VecSet, + role_tags: Option, + clock: &Clock, + ctx: &mut TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + + assert!(self.tags.contains_all_role_tags(&role_tags), ERecordTagNotDefined); + let old_record_tags = *role_map::get_role_data(self.access(), &role); + role_map::update_role( + self.access_mut(), + cap, + &role, + new_permissions, + copy role_tags, + clock, + ctx, + ); + + if (old_record_tags.is_some()) { + let tags = old_record_tags.borrow().tags().keys(); + let mut i = 0; + let tag_count = tags.length(); + + while (i < tag_count) { + self.tags.decrement_usage_count(&tags[i]); + i = i + 1; + }; + }; + + if (role_tags.is_some()) { + let tags = role_tags.borrow().tags().keys(); + let mut i = 0; + let tag_count = tags.length(); + + while (i < tag_count) { + self.tags.increment_usage_count(&tags[i]); + i = i + 1; + }; + }; +} + +/// Deletes an existing role. +public fun delete_role( + self: &mut AuditTrail, + cap: &Capability, + role: String, + clock: &Clock, + ctx: &mut TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + let old_record_tags = *role_map::get_role_data(self.access(), &role); + role_map::delete_role(self.access_mut(), cap, &role, clock, ctx); + + if (old_record_tags.is_some()) { + let tags = old_record_tags.borrow().tags().keys(); + let mut i = 0; + let tag_count = tags.length(); + + while (i < tag_count) { + self.tags.decrement_usage_count(&tags[i]); + i = i + 1; + }; + }; +} + +/// Issues a new capability for an existing role. +/// +/// The capability object is transferred to `issued_to` if provided, otherwise to the caller. +public fun new_capability( + self: &mut AuditTrail, + cap: &Capability, + role: String, + issued_to: Option
, + valid_from: Option, + valid_until: Option, + clock: &Clock, + ctx: &mut TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + + let recipient = if (issued_to.is_some()) { + let address_ref = issued_to.borrow(); + *address_ref + } else { + ctx.sender() + }; + + let new_cap = role_map::new_capability( + self.access_mut(), + cap, + &role, + issued_to, + valid_from, + valid_until, + clock, + ctx, + ); + transfer::public_transfer(new_cap, recipient); +} + +/// Revokes an issued capability by ID. +public fun revoke_capability( + self: &mut AuditTrail, + cap: &Capability, + cap_to_revoke: ID, + cap_to_revoke_valid_until: Option, + clock: &Clock, + ctx: &TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + role_map::revoke_capability( + self.access_mut(), + cap, + cap_to_revoke, + cap_to_revoke_valid_until, + clock, + ctx, + ); +} + +/// Destroys a capability object. +/// +/// Requires a capability with `RevokeCapabilities` permission. +public fun destroy_capability( + self: &mut AuditTrail, + cap: &Capability, + cap_to_destroy: Capability, + clock: &Clock, + ctx: &mut TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + self + .roles + .assert_capability_valid( + cap, + &permission::revoke_capabilities(), + clock, + ctx, + ); + role_map::destroy_capability(self.access_mut(), cap_to_destroy); +} + +/// Destroys an initial admin capability. +/// +/// Self-service: the owner passes in their own initial admin capability to destroy it. +/// No additional authorization is required. +/// +/// WARNING: If all initial admin capabilities are destroyed, the trail will be permanently +/// sealed with no admin access possible. +public fun destroy_initial_admin_capability( + self: &mut AuditTrail, + cap_to_destroy: Capability, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + role_map::destroy_initial_admin_capability(self.access_mut(), cap_to_destroy); +} + +/// Revokes an initial admin capability by ID. +/// +/// Requires a capability with `RevokeCapabilities` permission. +/// +/// WARNING: If all initial admin capabilities are revoked, the trail will be permanently +/// sealed with no admin access possible. +public fun revoke_initial_admin_capability( + self: &mut AuditTrail, + cap: &Capability, + cap_to_revoke: ID, + cap_to_revoke_valid_until: Option, + clock: &Clock, + ctx: &mut TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + role_map::revoke_initial_admin_capability( + self.access_mut(), + cap, + cap_to_revoke, + cap_to_revoke_valid_until, + clock, + ctx, + ); +} + +/// Remove expired entries from the `revoked_capabilities` denylist. +/// +/// Iterates through the revoked capabilities list and removes every entry whose +/// `valid_until` timestamp is **non-zero** and **less than** the current clock time, +/// because those capabilities are already naturally expired and no longer need to +/// occupy space in the denylist. +/// +/// Entries with `valid_until == 0` (i.e. capabilities that had no expiry) are kept, +/// since they remain potentially valid and must stay on the denylist. +/// +/// Parameters +/// ---------- +/// - cap: Reference to the capability used to authorize this operation. +/// Needs to grant the `CapabilityAdminPermissions::revoke` permission. +/// - clock: Reference to a Clock instance for obtaining the current timestamp. +/// - ctx: Reference to the transaction context. +/// +/// Errors: +/// - Aborts with any error documented by `assert_capability_valid` if the provided capability fails authorization checks. +public fun cleanup_revoked_capabilities( + self: &mut AuditTrail, + cap: &Capability, + clock: &Clock, + ctx: &TxContext, +) { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + self + .access_mut() + .cleanup_revoked_capabilities( + cap, + clock, + ctx, + ); +} + +// ===== Trail Query Functions ===== + +/// Get the total number of records currently in the trail +public fun record_count(self: &AuditTrail): u64 { + linked_table::length(&self.records) +} + +/// Get the next sequence number (monotonic counter, never decrements) +public fun sequence_number(self: &AuditTrail): u64 { + self.sequence_number +} + +/// Get the trail creator address +public fun creator(self: &AuditTrail): address { + self.creator +} + +/// Get the trail creation timestamp +public fun created_at(self: &AuditTrail): u64 { + self.created_at +} + +/// Get the trail's object ID +public fun id(self: &AuditTrail): ID { + object::uid_to_inner(&self.id) +} + +/// Get the trail name +public fun name(self: &AuditTrail): Option { + self.immutable_metadata.map!(|metadata| metadata.name) +} + +/// Get the trail description +public fun description(self: &AuditTrail): Option { + if (self.immutable_metadata.is_some()) { + option::borrow(&self.immutable_metadata).description + } else { + option::none() + } +} + +/// Get the updatable metadata +public fun metadata(self: &AuditTrail): &Option { + &self.updatable_metadata +} + +/// Get the locking configuration +public fun locking_config(self: &AuditTrail): &LockingConfig { + &self.locking_config +} + +/// Get the trail-defined record tags and their combined usage counts. +public fun tags(self: &AuditTrail): &TagRegistry { + &self.tags +} + +/// Check if the trail is empty +public fun is_empty(self: &AuditTrail): bool { + linked_table::is_empty(&self.records) +} + +/// Get the first sequence number +public fun first_sequence(self: &AuditTrail): Option { + *linked_table::front(&self.records) +} + +/// Get the last sequence number +public fun last_sequence(self: &AuditTrail): Option { + *linked_table::back(&self.records) +} + +// ===== Record Query Functions ===== + +/// Get a record by sequence number +public fun get_record(self: &AuditTrail, sequence_number: u64): &Record { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + assert!(linked_table::contains(&self.records, sequence_number), ERecordNotFound); + linked_table::borrow(&self.records, sequence_number) +} + +/// Check if a record exists at the given sequence number +public fun has_record(self: &AuditTrail, sequence_number: u64): bool { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + linked_table::contains(&self.records, sequence_number) +} + +/// Returns all records of the audit trail +public fun records(self: &AuditTrail): &LinkedTable> { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + &self.records +} +// ===== Access Control Functions ===== + +/// Returns a reference to the RoleMap managing access (roles and capabilities) for the audit trail. +public fun access(self: &AuditTrail): &RoleMap { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + &self.roles +} + +/// Returns a mutable reference to the RoleMap managing access (roles and capabilities) for the audit trail. +public(package) fun access_mut( + self: &mut AuditTrail, +): &mut RoleMap { + assert!(self.version == PACKAGE_VERSION, EPackageVersionMismatch); + &mut self.roles +} diff --git a/audit-trail-move/sources/locking.move b/audit-trail-move/sources/locking.move new file mode 100644 index 00000000..0ff838f5 --- /dev/null +++ b/audit-trail-move/sources/locking.move @@ -0,0 +1,202 @@ +// Copyright (c) 2025 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/// Locking configuration for audit trail records +module audit_trail::locking; + +use iota::clock::Clock; +use tf_components::timelock::{Self, TimeLock}; + +// ===== Errors ===== + +/// UntilDestroyed cannot be used for trail deletion protection. +const EUntilDestroyedNotSupportedForDeleteTrail: u64 = 0; + +/// Defines a locking window (time XOR count based, or none) +public enum LockingWindow has copy, drop, store { + None, + TimeBased { seconds: u64 }, + CountBased { count: u64 }, +} + +/// Top-level locking configuration for the audit trail +public struct LockingConfig has drop, store { + /// Locking rules for record deletion + delete_record_window: LockingWindow, + /// Timelock protecting deletion of the trail itself + delete_trail_lock: TimeLock, + /// Timelock protecting record writes (add_record) + write_lock: TimeLock, +} + +// ===== LockingWindow Constructors ===== + +/// Create a locking window with no restrictions +public fun window_none(): LockingWindow { + LockingWindow::None +} + +/// Create a time-based locking window +public fun window_time_based(seconds: u64): LockingWindow { + LockingWindow::TimeBased { seconds } +} + +/// Create a count-based locking window +public fun window_count_based(count: u64): LockingWindow { + LockingWindow::CountBased { count } +} + +// ===== LockingConfig Constructors ===== + +/// Create a new locking configuration +public fun new( + delete_record_window: LockingWindow, + delete_trail_lock: TimeLock, + write_lock: TimeLock, +): LockingConfig { + assert!( + !timelock::is_until_destroyed(&delete_trail_lock), + EUntilDestroyedNotSupportedForDeleteTrail, + ); + + LockingConfig { + delete_record_window, + delete_trail_lock, + write_lock, + } +} + +// ===== LockingWindow Getters ===== + +/// Get the time window in seconds (if set) +public(package) fun time_window_seconds(window: &LockingWindow): Option { + match (window) { + LockingWindow::TimeBased { seconds } => option::some(*seconds), + _ => option::none(), + } +} + +/// Get the count window (if set) +public(package) fun count_window(window: &LockingWindow): Option { + match (window) { + LockingWindow::CountBased { count } => option::some(*count), + _ => option::none(), + } +} + +// ===== LockingConfig Getters ===== + +/// Get the record deletion locking window +public(package) fun delete_record_window(config: &LockingConfig): &LockingWindow { + &config.delete_record_window +} + +/// Get the trail deletion timelock +public(package) fun delete_trail_lock(config: &LockingConfig): &TimeLock { + &config.delete_trail_lock +} + +/// Get the write timelock +public(package) fun write_lock(config: &LockingConfig): &TimeLock { + &config.write_lock +} + +// ===== LockingConfig Setters ===== + +/// Set the record deletion locking window +public(package) fun set_delete_record_window(config: &mut LockingConfig, window: LockingWindow) { + config.delete_record_window = window; +} + +/// Set the trail deletion timelock. +public(package) fun set_delete_trail_lock(config: &mut LockingConfig, lock: TimeLock) { + assert!(!timelock::is_until_destroyed(&lock), EUntilDestroyedNotSupportedForDeleteTrail); + + config.delete_trail_lock = lock; +} + +/// Set the write timelock. +public(package) fun set_write_lock(config: &mut LockingConfig, lock: TimeLock) { + config.write_lock = lock; +} + +/// Set the whole locking configuration. +public(package) fun set_config(config: &mut LockingConfig, new_config: LockingConfig) { + let LockingConfig { + delete_record_window, + delete_trail_lock, + write_lock, + } = new_config; + + set_delete_record_window(config, delete_record_window); + set_delete_trail_lock(config, delete_trail_lock); + set_write_lock(config, write_lock); +} + +// ===== Locking Logic (LockingWindow) ===== + +/// Check if a record is locked based on time window. +/// Returns true if the record was created within the time window. +fun is_time_locked(window: &LockingWindow, record_timestamp: u64, current_time: u64): bool { + match (window) { + LockingWindow::TimeBased { seconds } => { + let time_window_ms = (*seconds) * 1000; + let record_age = current_time - record_timestamp; + record_age < time_window_ms + }, + _ => false, + } +} + +/// Check if a record is locked based on count window. +/// Returns true if the record is among the last N records. +fun is_count_locked(window: &LockingWindow, sequence_number: u64, total_records: u64): bool { + match (window) { + LockingWindow::CountBased { count } => { + let records_after = total_records - sequence_number - 1; + records_after < *count + }, + _ => false, + } +} + +/// Check if a record is locked by a window (either by time or count). +fun is_window_locked( + window: &LockingWindow, + sequence_number: u64, + record_timestamp: u64, + total_records: u64, + current_time: u64, +): bool { + is_time_locked(window, record_timestamp, current_time) + || is_count_locked(window, sequence_number, total_records) +} + +// ===== Locking Logic (LockingConfig) ===== + +/// Check if a record is locked for deletion. +public fun is_delete_record_locked( + config: &LockingConfig, + sequence_number: u64, + record_timestamp: u64, + total_records: u64, + current_time: u64, +): bool { + is_window_locked( + &config.delete_record_window, + sequence_number, + record_timestamp, + total_records, + current_time, + ) +} + +/// Check if trail deletion is currently locked. +public fun is_delete_trail_locked(config: &LockingConfig, clock: &Clock): bool { + timelock::is_timelocked(delete_trail_lock(config), clock) +} + +/// Check if writes are currently locked. +public fun is_write_locked(config: &LockingConfig, clock: &Clock): bool { + timelock::is_timelocked(write_lock(config), clock) +} diff --git a/audit-trail-move/sources/permission.move b/audit-trail-move/sources/permission.move new file mode 100644 index 00000000..b16a984b --- /dev/null +++ b/audit-trail-move/sources/permission.move @@ -0,0 +1,247 @@ +// Copyright (c) 2025 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/// Permission system for role-based access control +module audit_trail::permission; + +use iota::vec_set::{Self, VecSet}; + +/// Existing permissions for the Audit Trail object +public enum Permission has copy, drop, store { + // --- Whole Audit Trail related - Proposed role: `Admin` --- + /// Destroy the whole Audit Trail object + DeleteAuditTrail, + /// Delete records in batches for cleanup workflows + DeleteAllRecords, + // --- Record Management - Proposed role: `RecordAdmin` --- + /// Add records to the trail + AddRecord, + /// Delete records from the trail + DeleteRecord, + /// Correct existing records in the trail + CorrectRecord, + // --- Locking Config - Proposed role: `LockingAdmin` --- + /// Update the whole locking configuration + UpdateLockingConfig, + /// Update the delete_record_lock configuration which is part of the locking configuration + UpdateLockingConfigForDeleteRecord, + /// Update the delete_lock configuration for the whole Audit Trail + UpdateLockingConfigForDeleteTrail, + /// Update the write_lock configuration for the whole Audit Trail + UpdateLockingConfigForWrite, + // --- Role Management - Proposed role: `RoleAdmin` --- + /// Add new roles with associated permissions + AddRoles, + /// Update permissions associated with existing roles + UpdateRoles, + /// Delete existing roles + DeleteRoles, + // --- Capability Management - Proposed role: `CapAdmin` --- + /// Issue new capabilities + AddCapabilities, + /// Revoke existing capabilities + RevokeCapabilities, + // --- Meta Data related - Proposed role: `MetadataAdmin` --- + /// Update the updatable metadata field + UpdateMetadata, + /// Delete the updatable metadata field + DeleteMetadata, + /// Migrate the audit trail to a new version of the contract + Migrate, + // --- Record Tag Management - Proposed role: `TagAdmin` --- + /// Add new record tags to the trail registry + AddRecordTags, + /// Remove record tags from the trail registry + DeleteRecordTags, +} + +/// Create an empty permission set +public fun empty(): VecSet { + vec_set::empty() +} + +/// Add a permission to a set +public fun add(set: &mut VecSet, perm: Permission) { + vec_set::insert(set, perm); +} + +/// Create a permission set from a vector +public fun from_vec(perms: vector): VecSet { + let mut set = vec_set::empty(); + let mut i = 0; + let len = perms.length(); + while (i < len) { + vec_set::insert(&mut set, perms[i]); + i = i + 1; + }; + set +} + +/// Check if a set contains a specific permission +public fun has_permission(set: &VecSet, perm: &Permission): bool { + vec_set::contains(set, perm) +} + +// ------Functions creating permission sets for often used roles --------- + +/// Create permissions typically used for the `Admin` role +public fun admin_permissions(): VecSet { + let mut perms = vec_set::empty(); + perms.insert(add_capabilities()); + perms.insert(revoke_capabilities()); + perms.insert(add_record_tags()); + perms.insert(delete_record_tags()); + perms.insert(add_roles()); + perms.insert(update_roles()); + perms.insert(delete_roles()); + perms +} + +/// Create permissions typical used for the `RecordAdmin` role +public fun record_admin_permissions(): VecSet { + let mut perms = vec_set::empty(); + perms.insert(add_record()); + perms.insert(delete_record()); + perms.insert(correct_record()); + perms +} + +/// Create permissions typical used for the `LockingAdmin` role +public fun locking_admin_permissions(): VecSet { + let mut perms = vec_set::empty(); + perms.insert(update_locking_config()); + perms.insert(update_locking_config_for_delete_trail()); + perms.insert(update_locking_config_for_delete_record()); + perms.insert(update_locking_config_for_write()); + perms +} + +/// Create permissions typical used for the `RoleAdmin` role +public fun role_admin_permissions(): VecSet { + let mut perms = vec_set::empty(); + perms.insert(add_roles()); + perms.insert(update_roles()); + perms.insert(delete_roles()); + perms +} + +/// Create permissions typically used for the `TagAdmin` role +public fun tag_admin_permissions(): VecSet { + let mut perms = vec_set::empty(); + perms.insert(add_record_tags()); + perms.insert(delete_record_tags()); + perms +} + +/// Create permissions typical used for the `CapAdmin` role +public fun cap_admin_permissions(): VecSet { + let mut perms = vec_set::empty(); + perms.insert(add_capabilities()); + perms.insert(revoke_capabilities()); + perms +} + +/// Create permissions typical used for the `MetadataAdmin` role +public fun metadata_admin_permissions(): VecSet { + let mut perms = vec_set::empty(); + perms.insert(update_metadata()); + perms.insert(delete_metadata()); + perms +} + +// ------- Constructor functions for all Permission variants ------------- + +/// Returns a permission allowing to destroy the whole Audit Trail object +public fun delete_audit_trail(): Permission { + Permission::DeleteAuditTrail +} + +/// Returns a permission allowing to delete records in batches +public fun delete_all_records(): Permission { + Permission::DeleteAllRecords +} + +/// Returns a permission allowing to add records to the trail +public fun add_record(): Permission { + Permission::AddRecord +} + +/// Returns a permission allowing to delete records from the trail +public fun delete_record(): Permission { + Permission::DeleteRecord +} + +/// Returns a permission allowing to correct existing records in the trail +public fun correct_record(): Permission { + Permission::CorrectRecord +} + +/// Returns a permission allowing to update the whole locking configuration +public fun update_locking_config(): Permission { + Permission::UpdateLockingConfig +} + +/// Returns a permission allowing to update the delete_lock configuration for records +public fun update_locking_config_for_delete_record(): Permission { + Permission::UpdateLockingConfigForDeleteRecord +} + +/// Returns a permission allowing to update the delete_lock configuration for the whole Audit Trail +public fun update_locking_config_for_delete_trail(): Permission { + Permission::UpdateLockingConfigForDeleteTrail +} + +/// Returns a permission allowing to update the write_lock configuration for the whole Audit Trail +public fun update_locking_config_for_write(): Permission { + Permission::UpdateLockingConfigForWrite +} + +/// Returns a permission allowing to add new record tags to the trail registry +public fun add_record_tags(): Permission { + Permission::AddRecordTags +} + +/// Returns a permission allowing to remove record tags from the trail registry +public fun delete_record_tags(): Permission { + Permission::DeleteRecordTags +} + +/// Returns a permission allowing to add new roles with associated permissions +public fun add_roles(): Permission { + Permission::AddRoles +} + +/// Returns a permission allowing to update permissions associated with existing roles +public fun update_roles(): Permission { + Permission::UpdateRoles +} + +/// Returns a permission allowing to delete existing roles +public fun delete_roles(): Permission { + Permission::DeleteRoles +} + +/// Returns a permission allowing to issue new capabilities +public fun add_capabilities(): Permission { + Permission::AddCapabilities +} + +/// Returns a permission allowing to revoke existing capabilities +public fun revoke_capabilities(): Permission { + Permission::RevokeCapabilities +} + +/// Returns a permission allowing to update the updatable_metadata field +public fun update_metadata(): Permission { + Permission::UpdateMetadata +} + +/// Returns a permission allowing to delete the updatable_metadata field +public fun delete_metadata(): Permission { + Permission::DeleteMetadata +} + +/// Returns a permission allowing to migrate the audit trail to a new version of the contract +public fun migrate_audit_trail(): Permission { + Permission::Migrate +} diff --git a/audit-trail-move/sources/record.move b/audit-trail-move/sources/record.move new file mode 100644 index 00000000..4a5c6985 --- /dev/null +++ b/audit-trail-move/sources/record.move @@ -0,0 +1,226 @@ +// Copyright (c) 2025 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/// Record module for audit trail entries +/// +/// A Record represents a single entry in an audit trail, stored in a +/// LinkedTable and addressed by trail_id + sequence_number. +module audit_trail::record; + +use iota::vec_set::{Self, VecSet}; +use std::string::String; + +/// Flexible record payload that can store either raw bytes or text. +public enum Data has copy, drop, store { + Bytes(vector), + Text(String), +} + +/// Creates a bytes payload. +public fun new_bytes(bytes: vector): Data { + Data::Bytes(bytes) +} + +/// Creates a text payload. +public fun new_text(text: String): Data { + Data::Text(text) +} + +/// Returns the bytes payload when present. +public fun bytes(data: &Data): Option> { + match (data) { + Data::Bytes(bytes) => option::some(*bytes), + Data::Text(_) => option::none(), + } +} + +/// Returns the text payload when present. +public fun text(data: &Data): Option { + match (data) { + Data::Bytes(_) => option::none(), + Data::Text(text) => option::some(*text), + } +} + +/// A single record in the audit trail +public struct Record has store { + /// Arbitrary data stored on-chain + data: D, + /// Optional metadata for this specific record + metadata: Option, + /// Optional immutable tag associated with this record + tag: Option, + /// Position in the trail (0-indexed, never reused) + sequence_number: u64, + /// Who added this record + added_by: address, + /// When this record was added (milliseconds) + added_at: u64, + /// Correction tracker for this record + correction: RecordCorrection, +} + +/// Input used when creating a trail with an initial record. +public struct InitialRecord has copy, drop, store { + data: D, + metadata: Option, + tag: Option, +} + +// ===== Constructors ===== + +/// Create a new initial-record input. +public fun new_initial_record( + data: D, + metadata: Option, + tag: Option, +): InitialRecord { + InitialRecord { data, metadata, tag } +} + +/// Create a new record +public(package) fun new( + data: D, + metadata: Option, + tag: Option, + sequence_number: u64, + added_by: address, + added_at: u64, + correction: RecordCorrection, +): Record { + Record { + data, + metadata, + tag, + sequence_number, + added_by, + added_at, + correction, + } +} + +/// Convert an initial-record input into a stored record. +public(package) fun into_record( + initial_record: InitialRecord, + sequence_number: u64, + added_by: address, + added_at: u64, +): Record { + let InitialRecord { data, metadata, tag } = initial_record; + new( + data, + metadata, + tag, + sequence_number, + added_by, + added_at, + empty(), + ) +} + +// ===== Getters ===== + +/// Get the stored data from a record +public fun data(self: &Record): &D { + &self.data +} + +/// Get the record metadata +public fun metadata(self: &Record): &Option { + &self.metadata +} + +/// Get the optional record tag +public fun tag(record: &Record): &Option { + &record.tag +} + +/// Get the record sequence number +public fun sequence_number(self: &Record): u64 { + self.sequence_number +} + +/// Get who added the record +public fun added_by(self: &Record): address { + self.added_by +} + +/// Get when the record was added (milliseconds) +public fun added_at(self: &Record): u64 { + self.added_at +} + +/// Get the correction tracker for this record +public fun correction(self: &Record): &RecordCorrection { + &self.correction +} + +/// Destroy a record +public(package) fun destroy(self: Record) { + let Record { + data: _, + metadata: _, + tag: _, + sequence_number: _, + added_by: _, + added_at: _, + correction: _, + } = self; +} + +/// Bidirectional correction tracking for audit records +public struct RecordCorrection has copy, drop, store { + replaces: VecSet, + is_replaced_by: Option, +} + +/// Create a new correction tracker for a normal (non-correcting) record +public fun empty(): RecordCorrection { + RecordCorrection { + replaces: vec_set::empty(), + is_replaced_by: option::none(), + } +} + +/// Create a correction tracker for a correcting record +public fun with_replaces(replaced_seq_nums: VecSet): RecordCorrection { + RecordCorrection { + replaces: replaced_seq_nums, + is_replaced_by: option::none(), + } +} + +/// Get the set of sequence numbers this record replaces +public fun replaces(correction: &RecordCorrection): &VecSet { + &correction.replaces +} + +/// Get the sequence number of the record that replaced this one +public fun is_replaced_by(correction: &RecordCorrection): Option { + correction.is_replaced_by +} + +/// Check if this record is a correction (replaces other records) +public fun is_correction(correction: &RecordCorrection): bool { + !vec_set::is_empty(&correction.replaces) +} + +/// Check if this record has been replaced by another record +public fun is_replaced(correction: &RecordCorrection): bool { + correction.is_replaced_by.is_some() +} + +/// Set the sequence number of the record that replaced this one +public(package) fun set_replaced_by(correction: &mut RecordCorrection, replacement_seq: u64) { + correction.is_replaced_by = option::some(replacement_seq); +} + +/// Add a sequence number to the set of records this record replaces +public(package) fun add_replaces(correction: &mut RecordCorrection, seq_num: u64) { + correction.replaces.insert(seq_num); +} + +/// Destroy a RecordCorrection +public(package) fun destroy_record_correction(correction: RecordCorrection) { + let RecordCorrection { replaces: _, is_replaced_by: _ } = correction; +} diff --git a/audit-trail-move/sources/record_tags.move b/audit-trail-move/sources/record_tags.move new file mode 100644 index 00000000..c319660b --- /dev/null +++ b/audit-trail-move/sources/record_tags.move @@ -0,0 +1,154 @@ +// Copyright (c) 2025 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/// Record tag types and helper predicates for audit trails. +module audit_trail::record_tags; + +use audit_trail::permission::Permission; +use iota::{vec_map::{Self, VecMap}, vec_set::{Self, VecSet}}; +use std::string::String; +use tf_components::{capability::Capability, role_map::{Self, RoleMap}}; + +// ----------- RoleTags ------- + +/// Stores all record tag related data associated with a role in the RoleMap. +/// Contains a list of allowlisted tags for the role. +public struct RoleTags has copy, drop, store { + tags: VecSet, +} + +/// Create a new `RoleTags`. +public fun new_role_tags(tags: vector): RoleTags { + RoleTags { + tags: vec_set::from_keys(tags), + } +} + +/// Get the allowlisted record tags for a role from a `RoleTags`. +public fun tags(self: &RoleTags): &VecSet { + &self.tags +} + +// ----------- TagRegistry ------- + +/// A registry of tags available for use on an audit trail, along with usage counts +/// to track how many records and roles are currently using each tag. +/// Usage counts for roles and tags are summed and build a combined usage count. +public struct TagRegistry has copy, drop, store { + tag_map: VecMap, +} + +/// Get a mapping of record tag names to `u64`. +public fun tag_map(self: &TagRegistry): &VecMap { + &self.tag_map +} + +/// Create a `TagRegistry` with zeroed usage counts to manage a list of available tags to be +/// associated with records and roles on an audit trail. +public(package) fun new_tag_registry(mut tags: vector): TagRegistry { + let mut usage = vec_map::empty(); + tags.reverse(); + + while (tags.length() != 0) { + vec_map::insert(&mut usage, tags.pop_back(), 0); + }; + + TagRegistry { tag_map: usage } +} + +/// Destroys the `TagRegistry` by emptying the internal tag map and then destroying it. +public(package) fun destroy(mut self: TagRegistry) { + while (!self.tag_map.is_empty()) { + let (_, _) = self.tag_map.pop(); + }; + self.tag_map.destroy_empty(); +} + +public(package) fun insert_tag(self: &mut TagRegistry, tag: String, usage_count: u64) { + self.tag_map.insert(tag, usage_count); +} + +public(package) fun remove_tag(self: &mut TagRegistry, tag: &String) { + self.tag_map.remove(tag); +} + +public(package) fun tag_keys(self: &TagRegistry): vector { + iota::vec_map::keys(&self.tag_map) +} + +/// Returns true when all provided `role_tags` (tags associated with a role) are contained in the `TagRegistry`. +public(package) fun contains_all_role_tags(self: &TagRegistry, role_tags: &Option): bool { + if (!role_tags.is_some()) { + return true + }; + + let tags = &option::borrow(role_tags).tags; + let allowed_tag_keys = iota::vec_set::keys(tags); + let mut i = 0; + let tag_count = allowed_tag_keys.length(); + + while (i < tag_count) { + if (!iota::vec_map::contains(&self.tag_map, &allowed_tag_keys[i])) { + return false + }; + i = i + 1; + }; + + true +} + +/// Returns true when the specified tag is contained in the `TagRegistry`. +public(package) fun contains(self: &TagRegistry, tag: &String): bool { + iota::vec_map::contains(&self.tag_map, tag) +} + +/// Returns the current combined usage count (sum of role and record usages) for a tag. +/// Returns `Option::none()` if the tag is not contained in the registry. +public(package) fun usage_count(self: &TagRegistry, tag: &String): Option { + if (self.tag_map.contains(tag)) { + option::some(*self.tag_map.get(tag)) + } else { + option::none() + } +} + +/// Increments the usage count for a tag by 1. +/// Will be without effect if the tag is not contained in the registry. +public(package) fun increment_usage_count(self: &mut TagRegistry, tag: &String) { + if (self.tag_map.contains(tag)) { + let counters = vec_map::get_mut(&mut self.tag_map, tag); + *counters = *counters + 1; + }; +} + +/// Decrements the usage count for a tag by 1. +/// Will be without effect if the tag is not contained in the registry. +public(package) fun decrement_usage_count(self: &mut TagRegistry, tag: &String) { + if (self.tag_map.contains(tag)) { + let counters = vec_map::get_mut(&mut self.tag_map, tag); + *counters = *counters - 1; + }; +} + +/// Returns if the specified is in use. +/// Returns false if the tag is not contained in the registry. +public(package) fun is_in_use(self: &TagRegistry, tag: &String): bool { + (*self.usage_count(tag).borrow_with_default(&0)) > 0 +} + +// ----------- RoleMap related ------- + +/// Returns true when the capability's role data allows the requested tag. +public(package) fun role_allows( + roles: &RoleMap, + cap: &Capability, + tag: &String, +): bool { + let role_tags = role_map::get_role_data(roles, cap.role()); + if (!role_tags.is_some()) { + return false + }; + + let tags = &option::borrow(role_tags).tags; + iota::vec_set::contains(tags, tag) +} diff --git a/audit-trail-move/tests/capability_tests.move b/audit-trail-move/tests/capability_tests.move new file mode 100644 index 00000000..6aaad721 --- /dev/null +++ b/audit-trail-move/tests/capability_tests.move @@ -0,0 +1,1380 @@ +#[allow(lint(abort_without_constant))] +#[test_only] +module audit_trail::capability_tests; + +use audit_trail::{ + locking, + main::AuditTrail, + permission, + record::{Self, Data}, + test_utils::{ + Self, + setup_test_audit_trail, + fetch_capability_trail_and_clock, + cleanup_capability_trail_and_clock + } +}; +use iota::test_scenario::{Self as ts, Scenario}; +use std::string; +use tf_components::{capability::Capability, timelock}; + +/// Helper function to setup an audit trail with a RecordAdmin role and a capability +/// with a time window restriction transferred to the record_user. +/// Returns the trail_id. +fun setup_trail_with_record_admin_capability_and_time_window_restriction( + scenario: &mut Scenario, + admin_user: address, + record_user: address, + valid_from_ms: u64, + valid_until_ms: u64, +): ID { + // Setup + let trail_id = setup_trail_with_record_admin_role(scenario, admin_user); + + // Issue capability with time window + ts::next_tx(scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(scenario); + + let cap = trail + .access_mut() + .new_capability( + &admin_cap, + &string::utf8(b"RecordAdmin"), + std::option::none(), // no address restriction + std::option::some(valid_from_ms), + std::option::some(valid_until_ms), + &clock, + ts::ctx(scenario), + ); + + // Verify capability properties + assert!(cap.issued_to().is_none(), 0); + assert!(cap.valid_from() == std::option::some(valid_from_ms), 1); + assert!(cap.valid_until() == std::option::some(valid_until_ms), 2); + + transfer::public_transfer(cap, record_user); + cleanup_capability_trail_and_clock(scenario, admin_cap, trail, clock); + }; + + trail_id +} + +/// Helper function to setup an audit trail with a RecordAdmin role. +/// Returns the trail_id. +fun setup_trail_with_record_admin_role(scenario: &mut Scenario, admin_user: address): ID { + // Setup: Create audit trail with admin capability + let trail_id = { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + + let (admin_cap, trail_id) = setup_test_audit_trail( + scenario, + locking_config, + std::option::none(), + ); + + transfer::public_transfer(admin_cap, admin_user); + trail_id + }; + + // Create a custom role for testing + ts::next_tx(scenario, admin_user); + { + let admin_cap = ts::take_from_sender(scenario); + let mut trail = ts::take_shared>(scenario); + let clock = iota::clock::create_for_testing(ts::ctx(scenario)); + + let record_admin_perms = permission::record_admin_permissions(); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + record_admin_perms, + std::option::none(), + &clock, + ts::ctx(scenario), + ); + + iota::clock::destroy_for_testing(clock); + ts::return_to_sender(scenario, admin_cap); + ts::return_shared(trail); + }; + + trail_id +} + +#[test] +fun test_new_capability() { + let admin_user = @0xAD; + let user1 = @0xB0B; + let user2 = @0xCAB; + + let mut scenario = ts::begin(admin_user); + + let trail_id = { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, trail_id) = setup_test_audit_trail( + &mut scenario, + locking_config, + option::none(), + ); + transfer::public_transfer(admin_cap, admin_user); + trail_id + }; + + // Create a role to issue capabilities for + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Issue first capability + ts::next_tx(&mut scenario, admin_user); + let cap1_id = { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let cap1 = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + assert!(cap1.role() == string::utf8(b"RecordAdmin"), 1); + assert!(cap1.target_key() == trail_id, 2); + + let cap1_id = object::id(&cap1); + + transfer::public_transfer(cap1, user1); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + + cap1_id + }; + + // Issue second capability and verify both have unique IDs + ts::next_tx(&mut scenario, admin_user); + let _cap2_id = { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let cap2 = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + let cap2_id = object::id(&cap2); + + // Verify capabilities have unique IDs + assert!(cap1_id != cap2_id, 3); + + transfer::public_transfer(cap2, user2); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + + cap2_id + }; + + ts::end(scenario); +} + +#[test] +fun test_revoke_capability() { + let admin_user = @0xAD; + let user1 = @0xB0B; + let user2 = @0xCAB; + + let mut scenario = ts::begin(admin_user); + + let _trail_id = setup_trail_with_record_admin_role(&mut scenario, admin_user); + + // Issue two capabilities + ts::next_tx(&mut scenario, admin_user); + let (cap1_id, cap2_id) = { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let cap1 = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + let cap1_id = object::id(&cap1); + transfer::public_transfer(cap1, user1); + + let cap2 = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + let cap2_id = object::id(&cap2); + transfer::public_transfer(cap2, user2); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + + (cap1_id, cap2_id) + }; + + // Test: Revoke first capability and verify it's tracked in the deny list + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + let cap1 = ts::take_from_address(&scenario, user1); + + // Verify the deny list is empty before revocation + let cap_count_before = trail.access().revoked_capabilities().length(); + assert!(cap_count_before == 0, 0); + + // Revoke the capability + trail + .access_mut() + .revoke_capability( + &admin_cap, + cap1.id(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify capability has been added to the deny list + assert!(trail.access().revoked_capabilities().length() == cap_count_before + 1, 1); + assert!(trail.access().revoked_capabilities().contains(cap1_id), 2); + + ts::return_to_address(user1, cap1); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Verify revoked capability object still exists (just invalidated) + ts::next_tx(&mut scenario, user1); + { + assert!(ts::has_most_recent_for_sender(&scenario), 3); + }; + + // Test: Revoke second capability + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + let cap2 = ts::take_from_address(&scenario, user2); + + let cap_count_before = trail.access().revoked_capabilities().length(); + assert!(cap_count_before == 1, 4); // only the first revoked capability (cap1) should be in the list + + trail + .access_mut() + .revoke_capability( + &admin_cap, + cap2.id(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify capability has been added to the deny list + assert!(trail.access().revoked_capabilities().length() == cap_count_before + 1, 5); + assert!(trail.access().revoked_capabilities().contains(cap2_id), 6); + + ts::return_to_address(user2, cap2); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +fun test_destroy_capability() { + let admin_user = @0xAD; + let user1 = @0xB0B; + let user2 = @0xCAB; + + let mut scenario = ts::begin(admin_user); + + let _trail_id = setup_trail_with_record_admin_role(&mut scenario, admin_user); + + // Issue two capabilities + ts::next_tx(&mut scenario, admin_user); + let (_cap1_id, _cap2_id) = { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let cap1 = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + let cap1_id = object::id(&cap1); + transfer::public_transfer(cap1, user1); + + let cap2 = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + let cap2_id = object::id(&cap2); + transfer::public_transfer(cap2, user2); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + + (cap1_id, cap2_id) + }; + + // User1 destroys their capability + ts::next_tx(&mut scenario, user1); + { + let mut trail = ts::take_shared>(&scenario); + let cap1 = ts::take_from_sender(&scenario); + + // Destroy the capability + trail.access_mut().destroy_capability(cap1); + + ts::return_shared(trail); + }; + + // Verify destroyed capability no longer exists + ts::next_tx(&mut scenario, user1); + { + assert!(!ts::has_most_recent_for_sender(&scenario), 0); + }; + + // Test: User2 destroys their own capability + ts::next_tx(&mut scenario, user2); + { + let mut trail = ts::take_shared>(&scenario); + let cap2 = ts::take_from_sender(&scenario); + + trail.access_mut().destroy_capability(cap2); + + ts::return_shared(trail); + }; + + // Verify destroyed capability no longer exists + ts::next_tx(&mut scenario, user2); + { + assert!(!ts::has_most_recent_for_sender(&scenario), 1); + }; + + ts::end(scenario); +} + +/// Test capability lifecycle: creation, usage, and destruction in a complete workflow. +/// +/// This test validates: +/// - Multiple capabilities can be created for different roles +/// - Capabilities can be used to perform authorized actions +/// - Capabilities can be revoked or destroyed +/// - revoked_capabilities tracking remains accurate throughout the lifecycle +#[test] +fun test_capability_lifecycle() { + let admin_user = @0xAD; + let record_admin_user = @0xB0B; + let role_admin_user = @0xCAB; + + let mut scenario = ts::begin(admin_user); + + // Setup: Create audit trail + let _trail_id = setup_trail_with_record_admin_role(&mut scenario, admin_user); + + // Create an additional RoleAdmin role + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RoleAdmin"), + permission::role_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Issue capabilities + ts::next_tx(&mut scenario, admin_user); + let (_record_cap_id, role_cap_id) = { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + let record_cap_id = object::id(&record_cap); + transfer::public_transfer(record_cap, record_admin_user); + + let role_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RoleAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + let role_cap_id = object::id(&role_cap); + transfer::public_transfer(role_cap, role_admin_user); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + + (record_cap_id, role_cap_id) + }; + + // Use RecordAdmin capability to add a record + ts::next_tx(&mut scenario, record_admin_user); + { + let (record_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + + clock.set_for_testing(test_utils::initial_time_for_testing() + 1000); + + let test_data = record::new_text(string::utf8(b"Test record")); + trail.add_record( + &record_cap, + test_data, + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, record_cap, trail, clock); + }; + + // RecordAdmin destroys their capability + ts::next_tx(&mut scenario, record_admin_user); + { + let mut trail = ts::take_shared>(&scenario); + let record_cap = ts::take_from_sender(&scenario); + + trail.access_mut().destroy_capability(record_cap); + + ts::return_shared(trail); + }; + + // Admin revokes RoleAdmin capability + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + let role_cap = ts::take_from_address(&scenario, role_admin_user); + + // Initially the deny list should be empty + assert!(trail.access().revoked_capabilities().length() == 0, 0); + + trail + .access_mut() + .revoke_capability( + &admin_cap, + role_cap.id(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify role_cap is in the deny list now + assert!(trail.access().revoked_capabilities().length() == 1, 1); + assert!(trail.access().revoked_capabilities().contains(role_cap_id), 2); + + ts::return_to_address(role_admin_user, role_cap); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test, expected_failure(abort_code = audit_trail::role_map::ECapabilityIssuedToMismatch)] +fun test_capability_issued_to_only() { + let admin_user = @0xAD; + let authorized_user = @0xB0B; + let unauthorized_user = @0xCAB; + + let mut scenario = ts::begin(admin_user); + + let _trail_id = setup_trail_with_record_admin_role(&mut scenario, admin_user); + + // Issue capability restricted to authorized_user only + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let cap = test_utils::new_capability_for_address( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + authorized_user, + std::option::none(), // no time restriction + &clock, + ts::ctx(&mut scenario), + ); + + // Verify capability properties + assert!(cap.issued_to() == std::option::some(authorized_user), 0); + assert!(cap.valid_from().is_none(), 1); + assert!(cap.valid_until().is_none(), 2); + + transfer::public_transfer(cap, authorized_user); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Authorized user can use the capability + ts::next_tx(&mut scenario, authorized_user); + { + let (record_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let test_data = record::new_text(string::utf8(b"Authorized record")); + trail.add_record( + &record_cap, + test_data, + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Transfer the capability to he unauthorized_user to prepare the next test + transfer::public_transfer(record_cap, unauthorized_user); + + // Cleanup + iota::clock::destroy_for_testing(clock); + ts::return_shared(trail); + }; + + // Unauthorized user cannot use the capability + ts::next_tx(&mut scenario, unauthorized_user); + { + let (record_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // This should fail as unauthorized_user has the wrong address + let test_data = record::new_text(string::utf8(b"Unauthorized record")); + trail.add_record( + &record_cap, + test_data, + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, record_cap, trail, clock); + }; + + ts::end(scenario); +} + +// ===== Error Case Tests ===== + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ECapabilityHasBeenRevoked)] +fun test_revoked_capability_cannot_be_used() { + let admin_user = @0xAD; + let user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + option::none(), + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + // Create role and issue capability to user + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let user_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(user_cap, user); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Revoke the capability + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + let user_cap = ts::take_from_address(&scenario, user); + + trail + .access_mut() + .revoke_capability( + &admin_cap, + user_cap.id(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + ts::return_to_address(user, user_cap); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Try to use revoked capability - should fail + ts::next_tx(&mut scenario, user); + { + let (user_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + + clock.set_for_testing(test_utils::initial_time_for_testing() + 1000); + + trail.add_record( + &user_cap, + record::new_text(string::utf8(b"Should fail")), + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, user_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ERoleDoesNotExist)] +fun test_new_capability_for_nonexistent_role() { + let admin_user = @0xAD; + + let mut scenario = ts::begin(admin_user); + + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let bad_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"NonExistentRole"), + &clock, + ts::ctx(&mut scenario), + ); + + bad_cap.destroy_for_testing(); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ECapabilityPermissionDenied)] +fun test_revoke_capability_permission_denied() { + let admin_user = @0xAD; + let user1 = @0xB0B; + let user2 = @0xCAB; + + let mut scenario = ts::begin(admin_user); + + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + // Create two roles: one without revoke permission, one with record permissions + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let perms = permission::from_vec(vector[permission::add_record()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"NoRevokePerm"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let user1_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"NoRevokePerm"), + &clock, + ts::ctx(&mut scenario), + ); + + let user2_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(user1_cap, user1); + transfer::public_transfer(user2_cap, user2); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // User1 (without revoke permission) tries to revoke User2's capability + ts::next_tx(&mut scenario, user1); + { + let user1_cap = ts::take_from_sender(&scenario); + let mut trail = ts::take_shared>(&scenario); + let user2_cap = ts::take_from_address(&scenario, user2); + let clock = iota::clock::create_for_testing(ts::ctx(&mut scenario)); + + trail + .access_mut() + .revoke_capability( + &user1_cap, + user2_cap.id(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + ts::return_to_address(user2, user2_cap); + ts::return_to_sender(&scenario, user1_cap); + ts::return_shared(trail); + iota::clock::destroy_for_testing(clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ECapabilityPermissionDenied)] +fun test_new_capability_permission_denied() { + let admin_user = @0xAD; + let user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + // Create role without add_capabilities permission + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let perms = permission::from_vec(vector[permission::add_record()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"NoCapPerm"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let user_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"NoCapPerm"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(user_cap, user); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // User tries to issue a new capability without permission + ts::next_tx(&mut scenario, user); + { + let (user_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let new_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &user_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + new_cap.destroy_for_testing(); + cleanup_capability_trail_and_clock(&scenario, user_cap, trail, clock); + }; + + ts::end(scenario); +} + +/// Test capability with only valid_from restriction (time-restricted from a point). +/// +/// This test validates: +/// - Capability can be used after valid_from timestamp +/// - Capability is not restricted by address or end time +/// - Capability cannot be used before valid_from timestamp +#[test, expected_failure(abort_code = audit_trail::role_map::ECapabilityTimeConstraintsNotMet)] +fun test_capability_valid_from_only() { + let admin_user = @0xAD; + let user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + let valid_from_time = test_utils::initial_time_for_testing() + 5000; + + // Setup + let _trail_id = setup_trail_with_record_admin_role(&mut scenario, admin_user); + + // Issue capability with valid_from restriction only + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let cap = trail + .access_mut() + .new_capability( + &admin_cap, + &string::utf8(b"RecordAdmin"), + std::option::none(), // no address restriction + std::option::some(valid_from_time), + std::option::none(), // no valid_until + &clock, + ts::ctx(&mut scenario), + ); + + // Verify capability properties + assert!(cap.issued_to().is_none(), 0); + assert!(cap.valid_from() == std::option::some(valid_from_time), 1); + assert!(cap.valid_until().is_none(), 2); + + transfer::public_transfer(cap, user); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Use the capability after valid_from + ts::next_tx(&mut scenario, user); + { + let (cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(test_utils::initial_time_for_testing() + 6000); + + let test_data = record::new_text(string::utf8(b"Test record after valid_from")); + trail.add_record( + &cap, + test_data, + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, cap, trail, clock); + }; + + // Try to use the capability before valid_from + ts::next_tx(&mut scenario, user); + { + let (cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(test_utils::initial_time_for_testing() + 1000); + + // This should fail as the capability is not valid yet + let test_data = record::new_text(string::utf8(b"Test record before valid_from")); + trail.add_record( + &cap, + test_data, + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, cap, trail, clock); + }; + + ts::end(scenario); +} + +/// Test capability with only valid_until restriction (time-restricted until a point). +/// +/// This test validates: +/// - Capability can be used before valid_until timestamp +/// - Capability is not restricted by address or start time +/// - Capability cannot be used after valid_until timestamp +#[test, expected_failure(abort_code = audit_trail::role_map::ECapabilityTimeConstraintsNotMet)] +fun test_capability_valid_until_only() { + let admin_user = @0xAD; + let user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + let valid_until_time_ms = test_utils::initial_time_for_testing() + 10000; + + // Setup + let _trail_id = setup_trail_with_record_admin_role(&mut scenario, admin_user); + + // Issue capability with valid_until restriction + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let cap = test_utils::new_capability_valid_until( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + valid_until_time_ms, + &clock, + ts::ctx(&mut scenario), + ); + + // Verify capability properties + assert!(cap.issued_to().is_none(), 0); + assert!(cap.valid_from().is_none(), 1); + assert!(cap.valid_until() == std::option::some(valid_until_time_ms), 2); + + transfer::public_transfer(cap, user); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Use the capability before valid_until + ts::next_tx(&mut scenario, user); + { + let (cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(valid_until_time_ms - 1000000); + + let test_data = record::new_text(string::utf8(b"Test record before valid_until")); + trail.add_record( + &cap, + test_data, + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, cap, trail, clock); + }; + + // Try to use the capability after valid_until + ts::next_tx(&mut scenario, user); + { + let (cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(valid_until_time_ms + 100000); + + // This should fail as the capability has expired + let test_data = record::new_text(string::utf8(b"Test record after valid_until")); + trail.add_record( + &cap, + test_data, + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, cap, trail, clock); + }; + + ts::end(scenario); +} + +/// Test capability with valid_from and valid_until restrictions (time window). +/// +/// This test validates: +/// - Capability can be used between valid_from and valid_until +/// - Capability is not restricted by address +#[test] +fun test_capability_time_window() { + let admin_user = @0xAD; + let user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + let valid_from_time = test_utils::initial_time_for_testing() + 5000; + let valid_until_time = test_utils::initial_time_for_testing() + 10000; + + // Setup + let _trail_id = setup_trail_with_record_admin_capability_and_time_window_restriction( + &mut scenario, + admin_user, + user, + valid_from_time, + valid_until_time, + ); + + // Use the capability within the valid time window + ts::next_tx(&mut scenario, user); + { + let (cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(valid_from_time + 2500); + + let test_data = record::new_text(string::utf8(b"Test record within time window")); + trail.add_record( + &cap, + test_data, + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, cap, trail, clock); + }; + + ts::end(scenario); +} + +/// Test capability with valid_from and valid_until restrictions (time window). +/// +/// This test validates: +/// - Capability cannot be used before valid_from +#[test, expected_failure(abort_code = audit_trail::role_map::ECapabilityTimeConstraintsNotMet)] +fun test_capability_time_window_before_valid_from() { + let admin_user = @0xAD; + let user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + let valid_from_time_ms = test_utils::initial_time_for_testing() + 5000; + let valid_until_time_ms = test_utils::initial_time_for_testing() + 10000; + + // Setup + let _trail_id = setup_trail_with_record_admin_capability_and_time_window_restriction( + &mut scenario, + admin_user, + user, + valid_from_time_ms, + valid_until_time_ms, + ); + + // Use the capability before valid_from + ts::next_tx(&mut scenario, user); + { + let (cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(valid_from_time_ms - 1000); + + let test_data = record::new_text(string::utf8(b"Test record before valid_from")); + trail.add_record( + &cap, + test_data, + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, cap, trail, clock); + }; + + ts::end(scenario); +} + +/// Test capability with valid_from and valid_until restrictions (time window). +/// +/// This test validates: +/// - Capability cannot be used after valid_until +#[test, expected_failure(abort_code = audit_trail::role_map::ECapabilityTimeConstraintsNotMet)] +fun test_capability_time_window_after_valid_until() { + let admin_user = @0xAD; + let user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + let valid_from_time_ms = test_utils::initial_time_for_testing() + 5000; + let valid_until_time_ms = test_utils::initial_time_for_testing() + 10000; + + // Setup + let _trail_id = setup_trail_with_record_admin_capability_and_time_window_restriction( + &mut scenario, + admin_user, + user, + valid_from_time_ms, + valid_until_time_ms, + ); + + // Use the capability after valid_until + ts::next_tx(&mut scenario, user); + { + let (cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(valid_until_time_ms + 1000); + + let test_data = record::new_text(string::utf8(b"Test record after valid_until")); + trail.add_record( + &cap, + test_data, + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, cap, trail, clock); + }; + + ts::end(scenario); +} + +/// Test Capability::is_valid_for_timestamp function. +/// +/// This test validates: +/// - Returns true when timestamp is within valid range +/// - Returns false when timestamp is before valid_from +/// - Returns false when timestamp is after valid_until +/// - Returns true when no time restrictions exist +#[test] +fun test_is_valid_for_timestamp() { + let admin_user = @0xAD; + let user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + let base_time = test_utils::initial_time_for_testing(); + let valid_from_time = base_time + 5000; + let valid_until_time = base_time + 10000; + + // Setup + let _trail_id = setup_trail_with_record_admin_role(&mut scenario, admin_user); + + // Test with time-restricted capability + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let cap = trail + .access_mut() + .new_capability( + &admin_cap, + &string::utf8(b"RecordAdmin"), + std::option::none(), + std::option::some(valid_from_time), + std::option::some(valid_until_time), + &clock, + ts::ctx(&mut scenario), + ); + + // Before valid_from + assert!(!cap.is_valid_for_timestamp(valid_from_time - 1), 0); + + // At valid_from (inclusive) + assert!(cap.is_valid_for_timestamp(valid_from_time), 1); + + // During validity period + assert!(cap.is_valid_for_timestamp(valid_from_time + 2500), 2); + + // Before valid_until (exclusive) + assert!(cap.is_valid_for_timestamp(valid_until_time - 1), 3); + + // At valid_until (inclusive) + assert!(cap.is_valid_for_timestamp(valid_until_time), 4); + + // After valid_until + assert!(!cap.is_valid_for_timestamp(valid_until_time + 1), 5); + + transfer::public_transfer(cap, user); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Test with unrestricted capability + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let unrestricted_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + // Should be valid at any timestamp + assert!(unrestricted_cap.is_valid_for_timestamp(0), 6); + assert!(unrestricted_cap.is_valid_for_timestamp(base_time), 7); + assert!(unrestricted_cap.is_valid_for_timestamp(valid_until_time + 99999), 8); + + transfer::public_transfer(unrestricted_cap, user); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} + +/// Test Capability::is_currently_valid function. +/// +/// This test validates: +/// - Returns true when current time is within valid range +/// - Returns false when current time is outside valid range +/// - Works correctly with Clock object +#[test] +fun test_is_currently_valid() { + let admin_user = @0xAD; + let user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + let base_time = test_utils::initial_time_for_testing(); + let valid_from_time = base_time + 5000; + let valid_until_time = base_time + 10000; + + // Setup + let _trail_id = setup_trail_with_record_admin_role(&mut scenario, admin_user); + + // Issue time-restricted capability + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let cap = trail + .access_mut() + .new_capability( + &admin_cap, + &string::utf8(b"RecordAdmin"), + std::option::none(), + std::option::some(valid_from_time), + std::option::some(valid_until_time), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(cap, user); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Test before valid_from + ts::next_tx(&mut scenario, user); + { + let cap = ts::take_from_sender(&scenario); + let mut clock = iota::clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(valid_from_time - 1000); + + assert!(!cap.is_currently_valid(&clock), 0); + + iota::clock::destroy_for_testing(clock); + ts::return_to_sender(&scenario, cap); + }; + + // Test during valid period + ts::next_tx(&mut scenario, user); + { + let cap = ts::take_from_sender(&scenario); + let mut clock = iota::clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(valid_from_time + 2500); + + assert!(cap.is_currently_valid(&clock), 1); + + iota::clock::destroy_for_testing(clock); + ts::return_to_sender(&scenario, cap); + }; + + // Test after valid_until + ts::next_tx(&mut scenario, user); + { + let cap = ts::take_from_sender(&scenario); + let mut clock = iota::clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(valid_until_time + 1000); + + assert!(!cap.is_currently_valid(&clock), 2); + + iota::clock::destroy_for_testing(clock); + ts::return_to_sender(&scenario, cap); + }; + + ts::end(scenario); +} diff --git a/audit-trail-move/tests/create_audit_trail_tests.move b/audit-trail-move/tests/create_audit_trail_tests.move new file mode 100644 index 00000000..8f6f6c52 --- /dev/null +++ b/audit-trail-move/tests/create_audit_trail_tests.move @@ -0,0 +1,494 @@ +#[allow(lint(abort_without_constant))] +#[test_only] +module audit_trail::create_audit_trail_tests; + +use audit_trail::{ + locking, + main::{Self, AuditTrail, initial_admin_role_name}, + permission, + record::{Self, Data}, + test_utils::{ + setup_test_audit_trail, + initial_time_for_testing, + fetch_capability_trail_and_clock, + cleanup_capability_trail_and_clock, + new_capability_for_address + } +}; +use iota::{clock, test_scenario as ts}; +use std::string; +use tf_components::timelock; + +#[test] +fun test_create_without_initial_record() { + let user = @0xA; + let mut scenario = ts::begin(user); + + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + + let (admin_cap, trail_id) = setup_test_audit_trail( + &mut scenario, + locking_config, + option::none(), + ); + + // Verify capability was created + assert!(admin_cap.role() == initial_admin_role_name(), 0); + assert!(admin_cap.target_key() == trail_id, 1); + + // Clean up + admin_cap.destroy_for_testing(); + }; + + ts::next_tx(&mut scenario, user); + { + let trail = ts::take_shared>(&scenario); + + // Verify trail was created correctly + assert!(trail.creator() == user, 2); + assert!(trail.created_at() == initial_time_for_testing(), 3); + assert!(trail.record_count() == 0, 4); + + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +#[test] +fun test_tag_admin_role_can_manage_available_record_tags() { + let admin = @0xA; + let tag_admin = @0xB; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + option::none(), + ); + + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail.create_role( + &admin_cap, + string::utf8(b"TagAdmin"), + permission::tag_admin_permissions(), + option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let tag_admin_cap = new_capability_for_address( + trail.access_mut(), + &admin_cap, + &string::utf8(b"TagAdmin"), + tag_admin, + option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(tag_admin_cap, tag_admin); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::next_tx(&mut scenario, tag_admin); + { + let (tag_admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail.add_record_tag( + &tag_admin_cap, + string::utf8(b"finance"), + &clock, + ts::ctx(&mut scenario), + ); + + let available_tags = trail.tags().tag_keys(); + assert!(available_tags.length() == 1, 0); + assert!(available_tags.contains(&string::utf8(b"finance")), 1); + + trail.remove_record_tag( + &tag_admin_cap, + string::utf8(b"finance"), + &clock, + ts::ctx(&mut scenario), + ); + + let available_tags = trail.tags().tag_keys(); + assert!(available_tags.length() == 0, 2); + + cleanup_capability_trail_and_clock(&scenario, tag_admin_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +fun test_create_with_initial_record() { + let user = @0xB; + let mut scenario = ts::begin(user); + + { + let locking_config = locking::new( + locking::window_time_based(86400), + timelock::none(), + timelock::none(), + ); // 1 day in seconds + let initial_data = record::new_text(string::utf8(b"Hello, World!")); + + let (admin_cap, trail_id) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(initial_data), + ); + + // Verify capability + assert!(admin_cap.role() == initial_admin_role_name(), 0); + assert!(admin_cap.target_key() == trail_id, 1); + + // Clean up + admin_cap.destroy_for_testing(); + }; + + ts::next_tx(&mut scenario, user); + { + let trail = ts::take_shared>(&scenario); + + // Verify trail with initial record + assert!(trail.creator() == user, 2); + assert!(trail.created_at() == initial_time_for_testing(), 3); + assert!(trail.record_count() == 1, 4); + + // Verify the initial record exists + assert!(trail.has_record(0), 5); + + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +#[test] +fun test_create_with_tagged_initial_record_tracks_tag_usage() { + let user = @0xC; + let mut scenario = ts::begin(user); + + { + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(initial_time_for_testing()); + + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let initial_record = record::new_initial_record( + record::new_text(string::utf8(b"Tagged initial record")), + option::none(), + option::some(string::utf8(b"finance")), + ); + + let (admin_cap, trail_id) = main::create( + option::some(initial_record), + locking_config, + option::none(), + option::none(), + vector[string::utf8(b"finance")], + &clock, + ts::ctx(&mut scenario), + ); + + assert!(admin_cap.role() == initial_admin_role_name(), 0); + assert!(admin_cap.target_key() == trail_id, 1); + admin_cap.destroy_for_testing(); + clock::destroy_for_testing(clock); + }; + + ts::next_tx(&mut scenario, user); + { + let trail = ts::take_shared>(&scenario); + let finance_tag = string::utf8(b"finance"); + + assert!(trail.record_count() == 1, 2); + assert!(trail.tags().usage_count(&finance_tag) == option::some(1), 3); + assert!(trail.tags().is_in_use(&finance_tag), 4); + + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +#[test, expected_failure(abort_code = audit_trail::main::ERecordTagInUse)] +fun test_create_with_tagged_initial_record_blocks_tag_removal() { + let admin = @0xD; + let mut scenario = ts::begin(admin); + + { + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(initial_time_for_testing()); + + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let initial_record = record::new_initial_record( + record::new_text(string::utf8(b"Tagged initial record")), + option::none(), + option::some(string::utf8(b"finance")), + ); + + let (admin_cap, _) = main::create( + option::some(initial_record), + locking_config, + option::none(), + option::none(), + vector[string::utf8(b"finance")], + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(admin_cap, admin); + clock::destroy_for_testing(clock); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + trail.remove_record_tag( + &admin_cap, + string::utf8(b"finance"), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +fun test_create_minimal_metadata() { + let user = @0xC; + let mut scenario = ts::begin(user); + + { + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(3000); + + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + + let (admin_cap, _trail_id) = main::create( + option::none(), + locking_config, + option::none(), + option::none(), + vector[], + &clock, + ts::ctx(&mut scenario), + ); + + // Verify capability was created + assert!(admin_cap.role() == initial_admin_role_name(), 0); + + // Clean up + admin_cap.destroy_for_testing(); + clock::destroy_for_testing(clock); + }; + + ts::next_tx(&mut scenario, user); + { + let trail = ts::take_shared>(&scenario); + + // Verify trail was created + assert!(trail.creator() == user, 1); + assert!(trail.created_at() == 3000, 2); + assert!(trail.record_count() == 0, 3); + + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +#[test] +fun test_create_with_locking_enabled() { + let user = @0xD; + let mut scenario = ts::begin(user); + + { + let locking_config = locking::new( + locking::window_time_based(604800), + timelock::none(), + timelock::none(), + ); // 7 days in seconds + let (admin_cap, _trail_id) = setup_test_audit_trail( + &mut scenario, + locking_config, + option::none(), + ); + + // Clean up + admin_cap.destroy_for_testing(); + }; + + ts::next_tx(&mut scenario, user); + { + let trail = ts::take_shared>(&scenario); + + // Verify trail with locking enabled + assert!(trail.creator() == user, 0); + assert!(trail.record_count() == 0, 1); + + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +#[test] +fun test_create_multiple_trails() { + let user = @0xE; + let mut scenario = ts::begin(user); + + let mut trail_ids = vector::empty(); + + // Create first trail + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap1, trail_id1) = setup_test_audit_trail( + &mut scenario, + locking_config, + option::none(), + ); + + trail_ids.push_back(trail_id1); + admin_cap1.destroy_for_testing(); + }; + + ts::next_tx(&mut scenario, user); + + // Create second trail + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap2, trail_id2) = setup_test_audit_trail( + &mut scenario, + locking_config, + option::none(), + ); + + trail_ids.push_back(trail_id2); + + // Verify trails have different IDs + assert!(trail_ids[0] != trail_ids[1], 0); + + admin_cap2.destroy_for_testing(); + }; + + ts::end(scenario); +} + +#[test] +fun test_create_metadata_admin_role() { + let creator = @0xA; + let user = @0xB; + let mut scenario = ts::begin(creator); + + // Creator creates the audit trail + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + + let (admin_cap, trail_id) = setup_test_audit_trail( + &mut scenario, + locking_config, + option::none(), + ); + + // Verify admin capability was created + assert!(admin_cap.role() == initial_admin_role_name(), 0); + assert!(admin_cap.target_key() == trail_id, 1); + + // Transfer the admin capability to the user + transfer::public_transfer(admin_cap, user); + }; + + // User receives the capability and creates the MetadataAdmin role + ts::next_tx(&mut scenario, user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + // Create the MetadataAdmin role using the admin capability + let metadata_admin_role_name = string::utf8(b"MetadataAdmin"); + let metadata_admin_perms = audit_trail::permission::metadata_admin_permissions(); + + trail + .access_mut() + .create_role( + &admin_cap, + metadata_admin_role_name, + metadata_admin_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify the role was created by fetching its permissions + let role_perms = trail.access().get_role_permissions(&string::utf8(b"MetadataAdmin")); + + // Verify the role has the correct permissions + assert!( + audit_trail::permission::has_permission( + role_perms, + &audit_trail::permission::update_metadata(), + ), + 2, + ); + assert!( + audit_trail::permission::has_permission( + role_perms, + &audit_trail::permission::delete_metadata(), + ), + 3, + ); + assert!(iota::vec_set::size(role_perms) == 2, 4); + + // Clean up + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} diff --git a/audit-trail-move/tests/locking_tests.move b/audit-trail-move/tests/locking_tests.move new file mode 100644 index 00000000..5ccb4e14 --- /dev/null +++ b/audit-trail-move/tests/locking_tests.move @@ -0,0 +1,1135 @@ +#[allow(lint(abort_without_constant))] +#[test_only] +module audit_trail::locking_tests; + +use audit_trail::{ + locking, + main::{Self, AuditTrail}, + permission, + record::{Self, Data}, + test_utils::{ + Self, + setup_test_audit_trail, + initial_time_for_testing, + fetch_capability_trail_and_clock, + cleanup_capability_trail_and_clock, + cleanup_trail_and_clock + } +}; +use iota::{clock, test_scenario as ts}; +use std::string; +use tf_components::{capability::Capability, timelock}; + +// ===== Time-Based Locking Tests ===== + +#[test] +fun test_time_based_locking_within_window() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Create trail with 1 hour time-based locking + { + let locking_config = locking::new( + locking::window_time_based(3600), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Test"))), + ); + admin_cap.destroy_for_testing(); + }; + + ts::next_tx(&mut scenario, admin); + { + let trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + + // 1 second after creation - locked + clock.set_for_testing(initial_time_for_testing() + 1000); + assert!(trail.is_record_locked(0, &clock), 0); + + // 30 minutes after - locked + clock.set_for_testing(initial_time_for_testing() + 1800 * 1000); + assert!(trail.is_record_locked(0, &clock), 1); + + // 59 minutes after - locked + clock.set_for_testing(initial_time_for_testing() + 3540 * 1000); + assert!(trail.is_record_locked(0, &clock), 2); + + clock::destroy_for_testing(clock); + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +#[test] +fun test_time_based_locking_outside_window() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Create trail with 1 hour time-based locking + { + let locking_config = locking::new( + locking::window_time_based(3600), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Test"))), + ); + admin_cap.destroy_for_testing(); + }; + + ts::next_tx(&mut scenario, admin); + { + let trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + + // 1 hour + 1 second after creation - unlocked + clock.set_for_testing(initial_time_for_testing() + 3601 * 1000); + assert!(!trail.is_record_locked(0, &clock), 0); + + // 2 hours after - unlocked + clock.set_for_testing(initial_time_for_testing() + 7200 * 1000); + assert!(!trail.is_record_locked(0, &clock), 1); + + clock::destroy_for_testing(clock); + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +// ===== Count-Based Locking Tests ===== + +#[test] +fun test_count_based_locking() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Create trail with count-based locking (last 2 locked) + { + let locking_config = locking::new( + locking::window_count_based(2), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create RecordAdmin role and capability + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(record_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + // Add 5 records and verify locking + ts::next_tx(&mut scenario, admin); + { + let record_cap = ts::take_from_sender(&scenario); + let mut trail = ts::take_shared>(&scenario); + + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(initial_time_for_testing() + 1000); + + let mut i = 0u64; + while (i < 5) { + trail.add_record( + &record_cap, + record::new_text(string::utf8(b"Record")), + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + i = i + 1; + }; + + // With 5 records and last 2 locked: + // Records 0, 1, 2 = unlocked (have 4, 3, 2 records after them) + // Records 3, 4 = locked (have 1, 0 records after them) + assert!(!trail.is_record_locked(0, &clock), 0); + assert!(!trail.is_record_locked(1, &clock), 1); + assert!(!trail.is_record_locked(2, &clock), 2); + assert!(trail.is_record_locked(3, &clock), 3); + assert!(trail.is_record_locked(4, &clock), 4); + + clock::destroy_for_testing(clock); + record_cap.destroy_for_testing(); + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +#[test] +fun test_count_based_locking_single_record() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Create trail with "last 3 locked" - single record should be locked + { + let locking_config = locking::new( + locking::window_count_based(3), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Single"))), + ); + admin_cap.destroy_for_testing(); + }; + + ts::next_tx(&mut scenario, admin); + { + let trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(initial_time_for_testing() + 1000); + + assert!(trail.is_record_locked(0, &clock), 0); + + clock::destroy_for_testing(clock); + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +// ===== No Locking Tests ===== + +#[test] +fun test_no_locking() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Test"))), + ); + admin_cap.destroy_for_testing(); + }; + + ts::next_tx(&mut scenario, admin); + { + let trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(initial_time_for_testing()); + + // No locking config = never locked + assert!(!trail.is_record_locked(0, &clock), 0); + + clock::destroy_for_testing(clock); + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +// ===== Update Locking Config Tests ===== + +#[test] +fun test_update_locking_config() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Create trail with no locking + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Test"))), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create LockingAdmin role + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let perms = permission::from_vec(vector[permission::update_locking_config()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"LockingAdmin"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let locking_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"LockingAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(locking_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + // Update from no-locking to time-based + ts::next_tx(&mut scenario, admin); + { + let (locking_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(initial_time_for_testing() + 1000); + + // Initially unlocked + assert!(!trail.is_record_locked(0, &clock), 0); + + // Update to 1 hour time-based locking + trail.update_locking_config( + &locking_cap, + locking::new(locking::window_time_based(3600), timelock::none(), timelock::none()), + &clock, + ts::ctx(&mut scenario), + ); + + // Now locked + assert!(trail.is_record_locked(0, &clock), 1); + + // locking_cap.destroy_for_testing(); + cleanup_capability_trail_and_clock(&scenario, locking_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ECapabilityPermissionDenied)] +fun test_update_locking_config_permission_denied() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create role WITHOUT UpdateLockingConfig permission + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let perms = permission::from_vec(vector[permission::add_record()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"NoLockingPerm"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let no_locking_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"NoLockingPerm"), + &clock, + ts::ctx(&mut scenario), + ); + transfer::public_transfer(no_locking_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + // Try to update locking config - should fail + ts::next_tx(&mut scenario, admin); + { + let (no_locking_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail.update_locking_config( + &no_locking_cap, + locking::new(locking::window_time_based(3600), timelock::none(), timelock::none()), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, no_locking_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +fun test_update_delete_record_window() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Create trail with no locking + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Test"))), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create role with UpdateLockingConfigForDeleteRecord permission + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let perms = permission::from_vec(vector[ + permission::update_locking_config_for_delete_record(), + ]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"DeleteLockAdmin"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let delete_lock_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"DeleteLockAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(delete_lock_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + // Update delete_record_lock + ts::next_tx(&mut scenario, admin); + { + let (delete_lock_cap, mut trail, mut clock) = fetch_capability_trail_and_clock( + &mut scenario, + ); + clock.set_for_testing(initial_time_for_testing() + 1000); + + // Initially unlocked + assert!(!trail.is_record_locked(0, &clock), 0); + + // Update to count-based (last 5 locked) + trail.update_delete_record_window( + &delete_lock_cap, + locking::window_count_based(5), + &clock, + ts::ctx(&mut scenario), + ); + + // Now locked (single record, last 5 are locked) + assert!(trail.is_record_locked(0, &clock), 1); + + cleanup_capability_trail_and_clock(&scenario, delete_lock_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ECapabilityPermissionDenied)] +fun test_update_delete_record_window_permission_denied() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create role with update_locking_config but NOT update_locking_config_for_delete_record + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let perms = permission::from_vec(vector[permission::update_locking_config()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"WrongPerm"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let wrong_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"WrongPerm"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(wrong_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + // Try to update delete_record_lock - should fail + ts::next_tx(&mut scenario, admin); + { + let (wrong_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail.update_delete_record_window( + &wrong_cap, + locking::window_count_based(5), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, wrong_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +fun test_time_lock_boundary_just_before_expiry() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Create trail with 1 hour time-based locking + { + let locking_config = locking::new( + locking::window_time_based(3600), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Test"))), + ); + admin_cap.destroy_for_testing(); + }; + + ts::next_tx(&mut scenario, admin); + { + let trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + + // 1 millisecond before lock expires - should still be locked + // 3600 * 1000 - 1 = 3599999 ms + clock.set_for_testing(initial_time_for_testing() + 3600 * 1000 - 1); + assert!(trail.is_record_locked(0, &clock), 0); + + clock::destroy_for_testing(clock); + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +// ===== Variant Locking Tests ===== + +#[test] +fun test_time_based_locking_all_recent_records_locked() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Create trail with time-based (1 hour) locking + { + let locking_config = locking::new( + locking::window_time_based(3600), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create RecordAdmin role and add records + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + // Add 5 records + clock.set_for_testing(initial_time_for_testing() + 1000); + + let mut i = 0u64; + while (i < 5) { + trail.add_record( + &record_cap, + record::new_text(string::utf8(b"Record")), + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + i = i + 1; + }; + + transfer::public_transfer(record_cap, admin); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Test: Records locked by time-based window + ts::next_tx(&mut scenario, admin); + { + let trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + + // Shortly after creation - all records are time-locked + clock.set_for_testing(initial_time_for_testing() + 2000); + + // All records should be locked (time lock active for all) + assert!(trail.is_record_locked(0, &clock), 0); + assert!(trail.is_record_locked(1, &clock), 1); + assert!(trail.is_record_locked(2, &clock), 2); + assert!(trail.is_record_locked(3, &clock), 3); + assert!(trail.is_record_locked(4, &clock), 4); + + clock::destroy_for_testing(clock); + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +#[test] +fun test_count_based_locking_last_records_remain_locked() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Create trail with count-based (last 2) locking + { + let locking_config = locking::new( + locking::window_count_based(2), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create RecordAdmin role and add records + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + // Add 5 records + clock.set_for_testing(initial_time_for_testing() + 1000); + + let mut i = 0u64; + while (i < 5) { + trail.add_record( + &record_cap, + record::new_text(string::utf8(b"Record")), + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + i = i + 1; + }; + + transfer::public_transfer(record_cap, admin); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Test: Count lock active for last 2 records + ts::next_tx(&mut scenario, admin); + { + let trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + + // 2 hours later, count lock behavior should be unchanged + clock.set_for_testing(initial_time_for_testing() + 7200 * 1000); + + // Records 0, 1, 2 should be unlocked (not in last 2) + assert!(!trail.is_record_locked(0, &clock), 0); + assert!(!trail.is_record_locked(1, &clock), 1); + assert!(!trail.is_record_locked(2, &clock), 2); + + // Records 3, 4 should still be locked (count lock - last 2) + assert!(trail.is_record_locked(3, &clock), 3); + assert!(trail.is_record_locked(4, &clock), 4); + + clock::destroy_for_testing(clock); + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +#[test] +fun test_count_based_locking_old_record_can_delete() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_count_based(2), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + clock.set_for_testing(initial_time_for_testing() + 1000); + + let mut i = 0u64; + while (i < 5) { + trail.add_record( + &record_cap, + record::new_text(string::utf8(b"Record")), + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + i = i + 1; + }; + + transfer::public_transfer(record_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + ts::next_tx(&mut scenario, admin); + { + let mut trail = ts::take_shared>(&scenario); + let record_cap = ts::take_from_sender(&scenario); + + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(initial_time_for_testing() + 7200 * 1000); + + assert!(!trail.is_record_locked(0, &clock), 0); + assert!(trail.has_record(0), 1); + + trail.delete_record(&record_cap, 0, &clock, ts::ctx(&mut scenario)); + + assert!(!trail.has_record(0), 2); + + clock::destroy_for_testing(clock); + record_cap.destroy_for_testing(); + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +#[test] +fun test_time_based_locking_still_locked_before_expiry() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Create trail with time-based (1 hour) locking + { + let locking_config = locking::new( + locking::window_time_based(3600), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create RecordAdmin role and add records + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + // Add 5 records + clock.set_for_testing(initial_time_for_testing() + 1000); + + let mut i = 0u64; + while (i < 5) { + trail.add_record( + &record_cap, + record::new_text(string::utf8(b"Record")), + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + i = i + 1; + }; + + transfer::public_transfer(record_cap, admin); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Test: Time lock still active before expiry + ts::next_tx(&mut scenario, admin); + { + let trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + + // Only 30 minutes after creation - time lock still active + clock.set_for_testing(initial_time_for_testing() + 1800 * 1000); + + // Records are still locked because time window has not expired yet + assert!(trail.is_record_locked(0, &clock), 0); + assert!(trail.is_record_locked(1, &clock), 1); + assert!(trail.is_record_locked(2, &clock), 2); + + clock::destroy_for_testing(clock); + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +#[test] +fun test_delete_records_batch_bypasses_record_lock() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_time_based(3600), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Locked"))), + ); + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + let delete_all_role = string::utf8(b"DeleteAllRecordsAdmin"); + let delete_all_perms = permission::from_vec(vector[permission::delete_all_records()]); + + trail + .access_mut() + .create_role( + &admin_cap, + delete_all_role, + delete_all_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let delete_all_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"DeleteAllRecordsAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + clock.set_for_testing(initial_time_for_testing() + 1000); + let deleted = trail.delete_records_batch( + &delete_all_cap, + 10, + &clock, + ts::ctx(&mut scenario), + ); + assert!(deleted == 1, 0); + assert!(trail.record_count() == 0, 1); + + delete_all_cap.destroy_for_testing(); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::main::ETrailNotEmpty)] +fun test_delete_audit_trail_fails_while_not_empty() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Record"))), + ); + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let admin_cap = ts::take_from_sender(&scenario); + let mut trail = ts::take_shared>(&scenario); + let clock = clock::create_for_testing(ts::ctx(&mut scenario)); + + let delete_trail_role = string::utf8(b"DeleteTrailOnly"); + let delete_trail_perms = permission::from_vec(vector[permission::delete_audit_trail()]); + trail + .access_mut() + .create_role( + &admin_cap, + delete_trail_role, + delete_trail_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let delete_trail_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"DeleteTrailOnly"), + &clock, + ts::ctx(&mut scenario), + ); + + main::delete_audit_trail(trail, &delete_trail_cap, &clock, ts::ctx(&mut scenario)); + + clock::destroy_for_testing(clock); + delete_trail_cap.destroy_for_testing(); + admin_cap.destroy_for_testing(); + }; + + ts::end(scenario); +} + +#[test] +fun test_delete_audit_trail_after_batch_cleanup() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Record"))), + ); + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let admin_cap = ts::take_from_sender(&scenario); + let mut trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + let delete_maintenance_role = string::utf8(b"DeleteMaintenance"); + let delete_maintenance_perms = permission::from_vec(vector[ + permission::delete_all_records(), + permission::delete_audit_trail(), + ]); + + trail + .access_mut() + .create_role( + &admin_cap, + delete_maintenance_role, + delete_maintenance_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let delete_maintenance_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"DeleteMaintenance"), + &clock, + ts::ctx(&mut scenario), + ); + + clock.set_for_testing(initial_time_for_testing() + 1000); + let deleted = trail.delete_records_batch( + &delete_maintenance_cap, + 100, + &clock, + ts::ctx(&mut scenario), + ); + assert!(deleted == 1, 0); + assert!(trail.record_count() == 0, 1); + + main::delete_audit_trail(trail, &delete_maintenance_cap, &clock, ts::ctx(&mut scenario)); + + clock::destroy_for_testing(clock); + delete_maintenance_cap.destroy_for_testing(); + admin_cap.destroy_for_testing(); + }; + + ts::end(scenario); +} diff --git a/audit-trail-move/tests/metadata_tests.move b/audit-trail-move/tests/metadata_tests.move new file mode 100644 index 00000000..62c7a4ea --- /dev/null +++ b/audit-trail-move/tests/metadata_tests.move @@ -0,0 +1,306 @@ +#[allow(lint(abort_without_constant))] +#[test_only] +module audit_trail::metadata_tests; + +use audit_trail::{ + locking, + permission, + test_utils::{ + Self, + setup_test_audit_trail, + fetch_capability_trail_and_clock, + cleanup_capability_trail_and_clock + } +}; +use iota::test_scenario as ts; +use std::{option::none, string}; +use tf_components::{capability::Capability, timelock}; + +// ===== Success Case Tests ===== + +#[test] +fun test_update_metadata_success() { + let admin_user = @0xAD; + let metadata_admin_user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + // Setup: Create audit trail + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + // Create MetadataAdmin role and capability + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Create MetadataAdmin role with metadata permissions + let metadata_perms = permission::metadata_admin_permissions(); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"MetadataAdmin"), + metadata_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Issue capability to metadata admin user + let metadata_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"MetadataAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(metadata_cap, metadata_admin_user); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Test: MetadataAdmin updates metadata + ts::next_tx(&mut scenario, metadata_admin_user); + { + let (metadata_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Update metadata + let new_metadata = std::option::some(string::utf8(b"Updated metadata value")); + trail.update_metadata( + &metadata_cap, + new_metadata, + &clock, + ts::ctx(&mut scenario), + ); + + // Verify metadata was updated + let current_metadata = trail.metadata(); + assert!(current_metadata.is_some(), 0); + assert!(*current_metadata.borrow() == string::utf8(b"Updated metadata value"), 1); + + cleanup_capability_trail_and_clock(&scenario, metadata_cap, trail, clock); + }; + + // Test: Update metadata again to verify multiple updates work + ts::next_tx(&mut scenario, metadata_admin_user); + { + let (metadata_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Update to different value + let new_metadata = std::option::some(string::utf8(b"Second update")); + trail.update_metadata( + &metadata_cap, + new_metadata, + &clock, + ts::ctx(&mut scenario), + ); + + // Verify metadata was updated + let current_metadata = trail.metadata(); + assert!(current_metadata.is_some(), 2); + assert!(*current_metadata.borrow() == string::utf8(b"Second update"), 3); + + cleanup_capability_trail_and_clock(&scenario, metadata_cap, trail, clock); + }; + + // Test: Set metadata to none + ts::next_tx(&mut scenario, metadata_admin_user); + { + let (metadata_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Set to none + trail.update_metadata( + &metadata_cap, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify metadata is now none + let current_metadata = trail.metadata(); + assert!(current_metadata.is_none(), 4); + + cleanup_capability_trail_and_clock(&scenario, metadata_cap, trail, clock); + }; + + ts::end(scenario); +} + +// ===== Error Case Tests ===== + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ECapabilityPermissionDenied)] +fun test_update_metadata_permission_denied() { + let admin_user = @0xAD; + let user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + // Setup + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + // Create role WITHOUT update_metadata permission + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Create role with only add_record permission (no update_metadata) + let perms = permission::from_vec(vector[permission::add_record()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"NoMetadataPerm"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let user_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"NoMetadataPerm"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(user_cap, user); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // User tries to update metadata - should fail + ts::next_tx(&mut scenario, user); + { + let (user_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // This should fail - no update_metadata permission + trail.update_metadata( + &user_cap, + std::option::some(string::utf8(b"Should fail")), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, user_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ECapabilityHasBeenRevoked)] +fun test_update_metadata_revoked_capability() { + let admin_user = @0xAD; + let metadata_admin_user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + // Setup: Create audit trail + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + // Create MetadataAdmin role and capability + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Create MetadataAdmin role + let metadata_perms = permission::metadata_admin_permissions(); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"MetadataAdmin"), + metadata_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Issue capability + let metadata_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"MetadataAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(metadata_cap, metadata_admin_user); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Revoke the capability + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + let metadata_cap = ts::take_from_address(&scenario, metadata_admin_user); + + trail + .access_mut() + .revoke_capability( + &admin_cap, + metadata_cap.id(), + none(), + &clock, + ts::ctx(&mut scenario), + ); + + ts::return_to_address(metadata_admin_user, metadata_cap); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Try to use revoked capability - should fail + ts::next_tx(&mut scenario, metadata_admin_user); + { + let (metadata_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // This should fail - capability has been revoked + trail.update_metadata( + &metadata_cap, + std::option::some(string::utf8(b"Should fail")), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, metadata_cap, trail, clock); + }; + + ts::end(scenario); +} diff --git a/audit-trail-move/tests/permission_tests.move b/audit-trail-move/tests/permission_tests.move new file mode 100644 index 00000000..67ae90c6 --- /dev/null +++ b/audit-trail-move/tests/permission_tests.move @@ -0,0 +1,131 @@ +#[allow(lint(abort_without_constant))] +#[test_only] +module audit_trail::permission_tests; + +use audit_trail::permission; +use iota::vec_set; + +#[test] +fun test_has_permission_empty_set() { + let set = permission::empty(); + assert!(vec_set::size(&set) == 0, 0); +} + +#[test] +fun test_has_permission_single_permission() { + let mut set = permission::empty(); + let perm = permission::add_record(); + permission::add(&mut set, perm); + + assert!(permission::has_permission(&set, &perm), 0); +} + +#[test] +fun test_has_permission_not_in_set() { + let mut set = permission::empty(); + permission::add(&mut set, permission::add_record()); + + let perm = permission::delete_record(); + assert!(!permission::has_permission(&set, &perm), 0); +} + +#[test] +fun test_has_permission_multiple_permission() { + let mut set = permission::empty(); + permission::add(&mut set, permission::add_record()); + permission::add(&mut set, permission::delete_record()); + permission::add(&mut set, permission::delete_audit_trail()); + + assert!(permission::has_permission(&set, &permission::add_record()), 0); + assert!(permission::has_permission(&set, &permission::delete_record()), 0); + assert!(permission::has_permission(&set, &permission::delete_audit_trail()), 0); + assert!(!permission::has_permission(&set, &permission::correct_record()), 0); +} + +#[test] +fun test_has_permission_from_vec() { + let perms = vector[ + permission::add_record(), + permission::delete_record(), + permission::update_metadata(), + ]; + let set = permission::from_vec(perms); + + assert!(permission::has_permission(&set, &permission::add_record()), 0); + assert!(permission::has_permission(&set, &permission::delete_record()), 0); + assert!(permission::has_permission(&set, &permission::update_metadata()), 0); + assert!(!permission::has_permission(&set, &permission::delete_audit_trail()), 0); +} + +#[test] +fun test_from_vec_empty() { + let perms = vector[]; + let set = permission::from_vec(perms); + + assert!(vec_set::size(&set) == 0, 0); +} + +#[test] +fun test_from_vec_single_permission() { + let perms = vector[permission::add_record()]; + let set = permission::from_vec(perms); + + assert!(vec_set::size(&set) == 1, 0); + assert!(permission::has_permission(&set, &permission::add_record()), 0); +} + +#[test] +fun test_from_vec_multiple_permission() { + let perms = vector[ + permission::add_record(), + permission::delete_record(), + permission::delete_audit_trail(), + ]; + let set = permission::from_vec(perms); + + assert!(vec_set::size(&set) == 3, 0); + assert!(permission::has_permission(&set, &permission::add_record()), 0); + assert!(permission::has_permission(&set, &permission::delete_record()), 0); + assert!(permission::has_permission(&set, &permission::delete_audit_trail()), 0); + assert!(!permission::has_permission(&set, &permission::correct_record()), 0); +} + +#[test] +fun test_metadata_admin_permissions() { + let perms = permission::metadata_admin_permissions(); + + assert!(permission::has_permission(&perms, &permission::update_metadata()), 0); + assert!(permission::has_permission(&perms, &permission::delete_metadata()), 0); + assert!(iota::vec_set::size(&perms) == 2, 0); +} + +#[test] +fun test_tag_admin_permissions() { + let perms = permission::tag_admin_permissions(); + + assert!(permission::has_permission(&perms, &permission::add_record_tags()), 0); + assert!(permission::has_permission(&perms, &permission::delete_record_tags()), 1); + assert!(iota::vec_set::size(&perms) == 2, 2); +} + +#[test] +fun test_admin_permissions_include_tag_management() { + let perms = permission::admin_permissions(); + + assert!(permission::has_permission(&perms, &permission::add_record_tags()), 0); + assert!(permission::has_permission(&perms, &permission::delete_record_tags()), 1); +} + +#[test] +#[expected_failure(abort_code = vec_set::EKeyAlreadyExists)] +fun test_from_vec_duplicate_permission() { + // VecSet should throw error EKeyAlreadyExists on duplicate insertions + let perms = vector[ + permission::add_record(), + permission::delete_record(), + permission::add_record(), // duplicate + ]; + let set = permission::from_vec(perms); + // The following line should not be reached due to the expected failure + assert!(vec_set::size(&set) == 2, 0); +} diff --git a/audit-trail-move/tests/record_tests.move b/audit-trail-move/tests/record_tests.move new file mode 100644 index 00000000..e3be34f2 --- /dev/null +++ b/audit-trail-move/tests/record_tests.move @@ -0,0 +1,1593 @@ +#[allow(lint(abort_without_constant))] +#[test_only] +module audit_trail::record_tests; + +use audit_trail::{ + locking, + main::{Self, AuditTrail}, + permission, + record::{Self, Data}, + record_tags, + test_utils::{ + Self, + setup_test_audit_trail, + setup_test_audit_trail_with_tags, + initial_time_for_testing, + fetch_capability_trail_and_clock, + cleanup_capability_trail_and_clock, + cleanup_trail_and_clock + } +}; +use iota::{clock, test_scenario as ts}; +use std::string; +use tf_components::{capability::Capability, timelock}; + +// ===== Add Record Tests ===== + +#[test] +fun test_add_record_to_empty_trail() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Setup trail + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create RecordAdmin role + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(record_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + // Add record + ts::next_tx(&mut scenario, admin); + { + let (record_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(initial_time_for_testing() + 1000); + + // Verify initial state + assert!(trail.record_count() == 0, 0); + assert!(trail.is_empty(), 1); + + // Add record + trail.add_record( + &record_cap, + record::new_text(string::utf8(b"First record")), + std::option::some(string::utf8(b"metadata")), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify record was added + assert!(trail.record_count() == 1, 2); + assert!(!trail.is_empty(), 3); + assert!(trail.has_record(0), 4); + + cleanup_capability_trail_and_clock(&scenario, record_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +fun test_add_tagged_record_with_matching_role_tags() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail_with_tags( + &mut scenario, + locking_config, + std::option::none(), + vector[string::utf8(b"finance")], + ); + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail.create_role( + &admin_cap, + string::utf8(b"TaggedWriter"), + permission::record_admin_permissions(), + std::option::some(record_tags::new_role_tags(vector[string::utf8(b"finance")])), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"TaggedWriter"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(record_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + ts::next_tx(&mut scenario, admin); + { + let (record_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(initial_time_for_testing() + 1000); + + trail.add_record( + &record_cap, + record::new_text(string::utf8(b"Tagged record")), + std::option::none(), + std::option::some(string::utf8(b"finance")), + &clock, + ts::ctx(&mut scenario), + ); + + let stored_record = trail.get_record(0); + assert!(*record::tag(stored_record) == std::option::some(string::utf8(b"finance")), 0); + + cleanup_capability_trail_and_clock(&scenario, record_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +fun test_delete_tagged_record_with_matching_role_tags() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail_with_tags( + &mut scenario, + locking_config, + std::option::none(), + vector[string::utf8(b"finance")], + ); + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail.create_role( + &admin_cap, + string::utf8(b"TaggedRecordAdmin"), + permission::record_admin_permissions(), + std::option::some(record_tags::new_role_tags(vector[string::utf8(b"finance")])), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"TaggedRecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(record_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + ts::next_tx(&mut scenario, admin); + { + let (record_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(initial_time_for_testing() + 1000); + + trail.add_record( + &record_cap, + record::new_text(string::utf8(b"Tagged record")), + std::option::none(), + std::option::some(string::utf8(b"finance")), + &clock, + ts::ctx(&mut scenario), + ); + + trail.delete_record(&record_cap, 0, &clock, ts::ctx(&mut scenario)); + + assert!(trail.record_count() == 0, 0); + + cleanup_capability_trail_and_clock(&scenario, record_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +fun test_delete_records_batch_with_matching_role_tags() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_time_based(3600), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail_with_tags( + &mut scenario, + locking_config, + std::option::none(), + vector[string::utf8(b"finance")], + ); + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"TaggedDeleteAll"), + permission::from_vec(vector[ + permission::add_record(), + permission::delete_all_records(), + ]), + std::option::some(record_tags::new_role_tags(vector[string::utf8(b"finance")])), + &clock, + ts::ctx(&mut scenario), + ); + + let cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"TaggedDeleteAll"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + ts::next_tx(&mut scenario, admin); + { + let (cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(initial_time_for_testing() + 1000); + + trail.add_record( + &cap, + record::new_text(string::utf8(b"Tagged record")), + std::option::none(), + std::option::some(string::utf8(b"finance")), + &clock, + ts::ctx(&mut scenario), + ); + + let deleted = trail.delete_records_batch(&cap, 10, &clock, ts::ctx(&mut scenario)); + assert!(deleted == 1, 0); + assert!(trail.record_count() == 0, 1); + + cleanup_capability_trail_and_clock(&scenario, cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::main::ERecordTagNotAllowed)] +fun test_add_tagged_record_requires_matching_role_tags() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail_with_tags( + &mut scenario, + locking_config, + std::option::none(), + vector[string::utf8(b"finance")], + ); + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"PlainWriter"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"PlainWriter"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(record_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + ts::next_tx(&mut scenario, admin); + { + let (record_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(initial_time_for_testing() + 1000); + + trail.add_record( + &record_cap, + record::new_text(string::utf8(b"Denied tagged record")), + std::option::none(), + std::option::some(string::utf8(b"finance")), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, record_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::main::ERecordTagNotAllowed)] +fun test_delete_tagged_record_requires_matching_role_tags() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail_with_tags( + &mut scenario, + locking_config, + std::option::none(), + vector[string::utf8(b"finance")], + ); + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"TaggedRecordAdmin"), + permission::record_admin_permissions(), + std::option::some(record_tags::new_role_tags(vector[string::utf8(b"finance")])), + &clock, + ts::ctx(&mut scenario), + ); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"DeleteOnly"), + permission::from_vec(vector[permission::delete_record()]), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let tagged_record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"TaggedRecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + let delete_only_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"DeleteOnly"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(tagged_record_cap, admin); + transfer::public_transfer(delete_only_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + ts::next_tx(&mut scenario, admin); + { + let delete_only_cap = ts::take_from_sender(&scenario); + let tagged_record_cap = ts::take_from_sender(&scenario); + let mut trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(initial_time_for_testing() + 1000); + + trail.add_record( + &tagged_record_cap, + record::new_text(string::utf8(b"Tagged record")), + std::option::none(), + std::option::some(string::utf8(b"finance")), + &clock, + ts::ctx(&mut scenario), + ); + + tagged_record_cap.destroy_for_testing(); + cleanup_capability_trail_and_clock(&scenario, delete_only_cap, trail, clock); + }; + + ts::next_tx(&mut scenario, admin); + { + let delete_only_cap = ts::take_from_sender(&scenario); + let mut trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(initial_time_for_testing() + 2000); + + trail.delete_record(&delete_only_cap, 0, &clock, ts::ctx(&mut scenario)); + + cleanup_capability_trail_and_clock(&scenario, delete_only_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::main::ERecordTagNotAllowed)] +fun test_delete_records_batch_requires_matching_role_tags() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_time_based(3600), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail_with_tags( + &mut scenario, + locking_config, + std::option::none(), + vector[string::utf8(b"finance")], + ); + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"TaggedWriter"), + permission::from_vec(vector[permission::add_record()]), + std::option::some(record_tags::new_role_tags(vector[string::utf8(b"finance")])), + &clock, + ts::ctx(&mut scenario), + ); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"DeleteAllWithoutTags"), + permission::from_vec(vector[permission::delete_all_records()]), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let tagged_writer_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"TaggedWriter"), + &clock, + ts::ctx(&mut scenario), + ); + let delete_all_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"DeleteAllWithoutTags"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(tagged_writer_cap, admin); + transfer::public_transfer(delete_all_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + ts::next_tx(&mut scenario, admin); + { + let delete_all_cap = ts::take_from_sender(&scenario); + let tagged_writer_cap = ts::take_from_sender(&scenario); + let mut trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(initial_time_for_testing() + 1000); + + trail.add_record( + &tagged_writer_cap, + record::new_text(string::utf8(b"Tagged record")), + std::option::none(), + std::option::some(string::utf8(b"finance")), + &clock, + ts::ctx(&mut scenario), + ); + + tagged_writer_cap.destroy_for_testing(); + cleanup_capability_trail_and_clock(&scenario, delete_all_cap, trail, clock); + }; + + ts::next_tx(&mut scenario, admin); + { + let delete_all_cap = ts::take_from_sender(&scenario); + let mut trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(initial_time_for_testing() + 2000); + + trail.delete_records_batch(&delete_all_cap, 10, &clock, ts::ctx(&mut scenario)); + + cleanup_capability_trail_and_clock(&scenario, delete_all_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::main::ERecordTagNotDefined)] +fun test_add_tagged_record_requires_trail_defined_tag() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail_with_tags( + &mut scenario, + locking_config, + std::option::none(), + vector[string::utf8(b"legal")], + ); + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail.create_role( + &admin_cap, + string::utf8(b"TaggedWriter"), + permission::record_admin_permissions(), + std::option::some(record_tags::new_role_tags(vector[string::utf8(b"finance")])), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"TaggedWriter"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(record_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + ts::next_tx(&mut scenario, admin); + { + let (record_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(initial_time_for_testing() + 1000); + + trail.add_record( + &record_cap, + record::new_text(string::utf8(b"Undefined tagged record")), + std::option::none(), + std::option::some(string::utf8(b"finance")), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, record_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::main::ERecordTagInUse)] +fun test_remove_record_tag_rejects_in_use_tag() { + let admin = @0xAD; + let writer = @0xB0B; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail_with_tags( + &mut scenario, + locking_config, + std::option::none(), + vector[string::utf8(b"finance")], + ); + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail.create_role( + &admin_cap, + string::utf8(b"TaggedWriter"), + permission::record_admin_permissions(), + std::option::some(record_tags::new_role_tags(vector[string::utf8(b"finance")])), + &clock, + ts::ctx(&mut scenario), + ); + + let writer_cap = test_utils::new_capability_for_address( + trail.access_mut(), + &admin_cap, + &string::utf8(b"TaggedWriter"), + writer, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(writer_cap, writer); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::next_tx(&mut scenario, writer); + { + let (writer_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(initial_time_for_testing() + 1000); + + trail.add_record( + &writer_cap, + record::new_text(string::utf8(b"Tagged")), + std::option::none(), + std::option::some(string::utf8(b"finance")), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(writer_cap, writer); + cleanup_trail_and_clock(trail, clock); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail.remove_record_tag( + &admin_cap, + string::utf8(b"finance"), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +fun test_add_multiple_records() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Setup trail + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create RecordAdmin role + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(record_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + // Add multiple records + ts::next_tx(&mut scenario, admin); + { + let (record_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(initial_time_for_testing() + 1000); + + // Add 3 records + let mut i = 0u64; + while (i < 3) { + trail.add_record( + &record_cap, + record::new_text(string::utf8(b"Record")), + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + i = i + 1; + }; + + // Verify all records exist + assert!(trail.record_count() == 3, 0); + assert!(trail.has_record(0), 1); + assert!(trail.has_record(1), 2); + assert!(trail.has_record(2), 3); + assert!(!trail.has_record(3), 4); + + cleanup_capability_trail_and_clock(&scenario, record_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ECapabilityPermissionDenied)] +fun test_add_record_permission_denied() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Setup trail + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create role WITHOUT AddRecord permission + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let perms = permission::from_vec(vector[permission::delete_record()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"NoAddPerm"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let no_add_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"NoAddPerm"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(no_add_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + // Try to add record - should fail + ts::next_tx(&mut scenario, admin); + { + let (no_add_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(initial_time_for_testing() + 1000); + + // This should fail - no AddRecord permission + trail.add_record( + &no_add_cap, + record::new_text(string::utf8(b"Should fail")), + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, no_add_cap, trail, clock); + }; + + ts::end(scenario); +} + +// ===== Delete Record Tests ===== + +#[test] +fun test_delete_record_success() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Setup trail with initial record + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Initial"))), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create RecordAdmin role + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(record_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + // Delete record + ts::next_tx(&mut scenario, admin); + { + let (record_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(initial_time_for_testing() + 1000); + + // Verify initial state + assert!(trail.record_count() == 1, 0); + assert!(trail.has_record(0), 1); + + // Delete record + trail.delete_record(&record_cap, 0, &clock, ts::ctx(&mut scenario)); + + // Verify record was deleted + assert!(trail.record_count() == 0, 2); // actual count decreases + assert!(trail.sequence_number() == 1, 3); // sequence stays monotonic + assert!(!trail.has_record(0), 4); // record is gone + + cleanup_capability_trail_and_clock(&scenario, record_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ECapabilityPermissionDenied)] +fun test_delete_record_permission_denied() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Setup trail with initial record + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Initial"))), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create role WITHOUT DeleteRecord permission + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let perms = permission::from_vec(vector[permission::add_record()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"NoDeletePerm"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let no_delete_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"NoDeletePerm"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(no_delete_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + // Try to delete record - should fail + ts::next_tx(&mut scenario, admin); + { + let (no_delete_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(initial_time_for_testing() + 1000); + + // This should fail - no DeleteRecord permission + trail.delete_record(&no_delete_cap, 0, &clock, ts::ctx(&mut scenario)); + + cleanup_capability_trail_and_clock(&scenario, no_delete_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = main::ERecordNotFound)] +fun test_delete_record_not_found() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Setup trail (no initial record) + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create RecordAdmin role + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(record_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + // Try to delete non-existent record - should fail + ts::next_tx(&mut scenario, admin); + { + let (record_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(initial_time_for_testing() + 1000); + + // This should fail - record doesn't exist + trail.delete_record(&record_cap, 999, &clock, ts::ctx(&mut scenario)); + + cleanup_capability_trail_and_clock(&scenario, record_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = main::ERecordLocked)] +fun test_delete_record_time_locked() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Setup trail with time-based locking and initial record + { + let locking_config = locking::new( + locking::window_time_based(3600), + timelock::none(), + timelock::none(), + ); // 1 hour + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Locked record"))), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create RecordAdmin role + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(record_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + // Try to delete locked record - should fail + ts::next_tx(&mut scenario, admin); + { + let (record_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + // Time is only 1 second after creation - still within lock window + clock.set_for_testing(initial_time_for_testing() + 1000); // +1 second + + // This should fail - record is time-locked + trail.delete_record(&record_cap, 0, &clock, ts::ctx(&mut scenario)); + + cleanup_capability_trail_and_clock(&scenario, record_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = main::ERecordLocked)] +fun test_delete_record_count_locked() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Setup trail with count-based locking and initial record + { + let locking_config = locking::new( + locking::window_count_based(5), + timelock::none(), + timelock::none(), + ); // Last 5 records locked + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Locked record"))), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create RecordAdmin role + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(record_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + // Try to delete locked record - should fail + ts::next_tx(&mut scenario, admin); + { + let (record_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + clock.set_for_testing(initial_time_for_testing() + 1000); + + // Only 1 record exists, and last 5 are locked, so it's locked + trail.delete_record(&record_cap, 0, &clock, ts::ctx(&mut scenario)); + + cleanup_capability_trail_and_clock(&scenario, record_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +fun test_delete_record_after_time_lock_expires() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_time_based(3600), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Locked record"))), + ); + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(record_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + ts::next_tx(&mut scenario, admin); + { + let trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + + clock.set_for_testing(initial_time_for_testing() + 3600 * 1000); + assert!(!trail.is_record_locked(0, &clock), 0); + + clock::destroy_for_testing(clock); + ts::return_shared(trail); + }; + + ts::next_tx(&mut scenario, admin); + { + let mut trail = ts::take_shared>(&scenario); + let record_cap = ts::take_from_sender(&scenario); + + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(initial_time_for_testing() + 3601 * 1000); + + assert!(trail.has_record(0), 1); + assert!(!trail.is_record_locked(0, &clock), 2); + + trail.delete_record(&record_cap, 0, &clock, ts::ctx(&mut scenario)); + + assert!(!trail.has_record(0), 3); + + clock::destroy_for_testing(clock); + record_cap.destroy_for_testing(); + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +// ===== Delete Records Batch Tests ===== + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ECapabilityPermissionDenied)] +fun test_delete_records_batch_requires_delete_all_records_permission() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(record::new_text(string::utf8(b"Record"))), + ); + transfer::public_transfer(admin_cap, admin); + }; + + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let perms = permission::from_vec(vector[permission::delete_audit_trail()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"TrailDeleteOnly"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let delete_only_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"TrailDeleteOnly"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(delete_only_cap, admin); + admin_cap.destroy_for_testing(); + cleanup_trail_and_clock(trail, clock); + }; + + ts::next_tx(&mut scenario, admin); + { + let delete_only_cap = ts::take_from_sender(&scenario); + let mut trail = ts::take_shared>(&scenario); + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(initial_time_for_testing() + 1000); + + trail.delete_records_batch(&delete_only_cap, 10, &clock, ts::ctx(&mut scenario)); + + clock::destroy_for_testing(clock); + delete_only_cap.destroy_for_testing(); + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +// ===== Query Function Tests ===== + +#[test] +fun test_get_record() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Setup trail with initial record + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let initial_data = record::new_bytes(b"Test data"); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::some(initial_data), + ); + admin_cap.destroy_for_testing(); + }; + + ts::next_tx(&mut scenario, admin); + { + let trail = ts::take_shared>(&scenario); + + let record = trail.get_record(0); + let data = audit_trail::record::data(record); + + assert!(record::bytes(data) == option::some(b"Test data"), 0); + + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = main::ERecordNotFound)] +fun test_get_record_not_found() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Setup trail (no initial record) + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + admin_cap.destroy_for_testing(); + }; + + ts::next_tx(&mut scenario, admin); + { + let trail = ts::take_shared>(&scenario); + + // This should fail - no records exist + let _record = trail.get_record(0); + + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +#[test] +fun test_first_last_sequence() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Setup trail + { + let locking_config = locking::new( + locking::window_none(), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin); + }; + + // Create RecordAdmin and test sequence functions + ts::next_tx(&mut scenario, admin); + { + let (admin_cap, mut trail, mut clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Empty trail + assert!(trail.first_sequence().is_none(), 0); + assert!(trail.last_sequence().is_none(), 1); + + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RecordAdmin"), + permission::record_admin_permissions(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let record_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + clock.set_for_testing(initial_time_for_testing() + 1000); + + // Add first record + trail.add_record( + &record_cap, + record::new_text(string::utf8(b"First")), + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + assert!(trail.first_sequence() == std::option::some(0), 2); + assert!(trail.last_sequence() == std::option::some(0), 3); + + // Add second record + trail.add_record( + &record_cap, + record::new_text(string::utf8(b"Second")), + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + assert!(trail.first_sequence() == std::option::some(0), 4); + assert!(trail.last_sequence() == std::option::some(1), 5); + + // Add third record + trail.add_record( + &record_cap, + record::new_text(string::utf8(b"Third")), + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + assert!(trail.first_sequence() == std::option::some(0), 6); + assert!(trail.last_sequence() == std::option::some(2), 7); + + record_cap.destroy_for_testing(); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = main::ERecordNotFound)] +fun test_is_record_locked_not_found() { + let admin = @0xAD; + let mut scenario = ts::begin(admin); + + // Setup trail (no initial record) + { + let locking_config = locking::new( + locking::window_time_based(3600), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + admin_cap.destroy_for_testing(); + }; + + ts::next_tx(&mut scenario, admin); + { + let trail = ts::take_shared>(&scenario); + + let mut clock = clock::create_for_testing(ts::ctx(&mut scenario)); + clock.set_for_testing(initial_time_for_testing() + 1000); + + // This should fail - record doesn't exist + let _locked = trail.is_record_locked(0, &clock); + + clock::destroy_for_testing(clock); + ts::return_shared(trail); + }; + + ts::end(scenario); +} diff --git a/audit-trail-move/tests/role_tests.move b/audit-trail-move/tests/role_tests.move new file mode 100644 index 00000000..a674988a --- /dev/null +++ b/audit-trail-move/tests/role_tests.move @@ -0,0 +1,820 @@ +#[allow(lint(abort_without_constant))] +#[test_only] +module audit_trail::role_tests; + +use audit_trail::{ + locking, + main::{initial_admin_role_name, AuditTrail}, + permission, + record::{Self, Data}, + record_tags, + test_utils::{ + Self, + setup_test_audit_trail, + setup_test_audit_trail_with_tags, + fetch_capability_trail_and_clock, + cleanup_capability_trail_and_clock + } +}; +use iota::test_scenario as ts; +use std::string; +use tf_components::timelock; + +#[test] +fun test_role_based_permission_delegation() { + let admin_user = @0xAD; + let role_admin_user = @0xB0B; + let cap_admin_user = @0xCAB; + let record_admin_user = @0xDED; + + let mut scenario = ts::begin(admin_user); + + // Step 1: admin_user creates the audit trail + let trail_id = { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + + let (admin_cap, trail_id) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + + // Verify admin capability was created with correct role and trail reference + assert!(admin_cap.role() == initial_admin_role_name(), 0); + assert!(admin_cap.target_key() == trail_id, 1); + + // Transfer the admin capability to the user + transfer::public_transfer(admin_cap, admin_user); + + trail_id + }; + + // Step 2: Admin creates RoleAdmin and CapAdmin roles + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Verify initial state - should only have the initial admin role + assert!(trail.access().size() == 1, 2); + + // Create RoleAdmin role + let role_admin_perms = permission::role_admin_permissions(); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RoleAdmin"), + role_admin_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Create CapAdmin role + let cap_admin_perms = permission::cap_admin_permissions(); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"CapAdmin"), + cap_admin_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify both roles were created + assert!(trail.access().size() == 3, 3); // Initial admin + RoleAdmin + CapAdmin + assert!(trail.access().has_role(&string::utf8(b"RoleAdmin")), 4); + assert!(trail.access().has_role(&string::utf8(b"CapAdmin")), 5); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Step 3: Admin creates capability for RoleAdmin and CapAdmin and transfers to the respective users + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let role_admin_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"RoleAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify the capability was created with correct role and trail ID + assert!(role_admin_cap.role() == string::utf8(b"RoleAdmin"), 6); + assert!(role_admin_cap.target_key() == trail_id, 7); + + iota::transfer::public_transfer(role_admin_cap, role_admin_user); + + let cap_admin_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"CapAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify the capability was created with correct role and trail ID + assert!(cap_admin_cap.role() == string::utf8(b"CapAdmin"), 8); + assert!(cap_admin_cap.target_key() == trail_id, 9); + + iota::transfer::public_transfer(cap_admin_cap, cap_admin_user); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // Step 5: RoleAdmin creates RecordAdmin role (demonstrating delegated role management) + ts::next_tx(&mut scenario, role_admin_user); + { + let (role_admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Verify RoleAdmin has the correct role + assert!(role_admin_cap.role() == string::utf8(b"RoleAdmin"), 10); + + let record_admin_perms = permission::record_admin_permissions(); + trail + .access_mut() + .create_role( + &role_admin_cap, + string::utf8(b"RecordAdmin"), + record_admin_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify RecordAdmin role was created successfully + assert!(trail.access().size() == 4, 11); // Initial admin + RoleAdmin + CapAdmin + RecordAdmin + assert!(trail.access().has_role(&string::utf8(b"RecordAdmin")), 12); + + cleanup_capability_trail_and_clock(&scenario, role_admin_cap, trail, clock); + }; + + // Step 6: CapAdmin creates capability for RecordAdmin and transfers to record_admin_user + ts::next_tx(&mut scenario, cap_admin_user); + { + let (cap_admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Verify CapAdmin has the correct role + assert!(cap_admin_cap.role() == string::utf8(b"CapAdmin"), 13); + + let record_admin_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &cap_admin_cap, + &string::utf8(b"RecordAdmin"), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify the capability was created with correct role and trail ID + assert!(record_admin_cap.role() == string::utf8(b"RecordAdmin"), 14); + assert!(record_admin_cap.target_key() == trail_id, 15); + + iota::transfer::public_transfer(record_admin_cap, record_admin_user); + + cleanup_capability_trail_and_clock(&scenario, cap_admin_cap, trail, clock); + }; + + // Step 7: RecordAdmin adds a new record to the audit trail (demonstrating delegated record management) + ts::next_tx(&mut scenario, record_admin_user); + { + let (record_admin_cap, mut trail, mut clock) = fetch_capability_trail_and_clock( + &mut scenario, + ); + clock.set_for_testing(test_utils::initial_time_for_testing() + 1000); + + // Verify RecordAdmin has the correct role + assert!(record_admin_cap.role() == string::utf8(b"RecordAdmin"), 16); + + // Verify initial record count + let initial_record_count = trail.records().length(); + + let test_data = record::new_text(string::utf8(b"Test record added by RecordAdmin")); + + trail.add_record( + &record_admin_cap, + test_data, + std::option::none(), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify the record was added successfully + assert!(trail.records().length() == initial_record_count + 1, 17); + + cleanup_capability_trail_and_clock(&scenario, record_admin_cap, trail, clock); + }; + + // Cleanup + ts::next_tx(&mut scenario, admin_user); + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::main::ERecordTagNotDefined)] +fun test_create_role_rejects_undefined_record_tags() { + let admin_user = @0xAD; + let mut scenario = ts::begin(admin_user); + + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail_with_tags( + &mut scenario, + locking_config, + std::option::none(), + vector[string::utf8(b"legal")], + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + let perms = permission::from_vec(vector[permission::add_record()]); + + trail.create_role( + &admin_cap, + string::utf8(b"TaggedRole"), + perms, + std::option::some(record_tags::new_role_tags(vector[string::utf8(b"finance")])), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +fun test_delete_role_success() { + let admin_user = @0xAD; + + let mut scenario = ts::begin(admin_user); + + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Verify initial state - only Admin role exists + assert!(trail.access().size() == 1, 0); + + // Create a role to delete + let perms = permission::from_vec(vector[permission::add_record()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RoleToDelete"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify the role was created + assert!(trail.access().size() == 2, 1); + assert!(trail.access().has_role(&string::utf8(b"RoleToDelete")), 2); + + // Delete the role + trail + .access_mut() + .delete_role( + &admin_cap, + &string::utf8(b"RoleToDelete"), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify the role was deleted + assert!(trail.access().size() == 1, 3); + assert!(!trail.access().has_role(&string::utf8(b"RoleToDelete")), 4); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::main::ERecordTagInUse)] +fun test_remove_record_tag_rejects_role_only_usage() { + let admin_user = @0xAD; + let mut scenario = ts::begin(admin_user); + + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail_with_tags( + &mut scenario, + locking_config, + std::option::none(), + vector[string::utf8(b"finance")], + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + let perms = permission::from_vec(vector[permission::add_record()]); + + trail.create_role( + &admin_cap, + string::utf8(b"TaggedRole"), + perms, + std::option::some(record_tags::new_role_tags(vector[string::utf8(b"finance")])), + &clock, + ts::ctx(&mut scenario), + ); + + trail.remove_record_tag( + &admin_cap, + string::utf8(b"finance"), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} + +// ===== Error Case Tests ===== + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ECapabilityPermissionDenied)] +fun test_create_role_permission_denied() { + let admin_user = @0xAD; + let user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + // Setup + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + // Create role without RolesAdd permission + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Create role WITHOUT add_roles permission + let perms = permission::from_vec(vector[permission::add_record()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"NoRolesPerm"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let user_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"NoRolesPerm"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(user_cap, user); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // User tries to create a role - should fail + ts::next_tx(&mut scenario, user); + { + let (user_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let perms = permission::from_vec(vector[permission::add_record()]); + + // This should fail - no add_roles permission + trail + .access_mut() + .create_role( + &user_cap, + string::utf8(b"NewRole"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, user_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ECapabilityPermissionDenied)] +fun test_delete_role_permission_denied() { + let admin_user = @0xAD; + let user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + // Setup + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + // Create roles + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Create a role to delete + let perms = permission::from_vec(vector[permission::add_record()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RoleToDelete"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Create role WITHOUT delete_roles permission + let no_delete_perms = permission::from_vec(vector[permission::add_record()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"NoDeleteRolePerm"), + no_delete_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let user_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"NoDeleteRolePerm"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(user_cap, user); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // User tries to delete a role - should fail + ts::next_tx(&mut scenario, user); + { + let (user_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // This should fail - no delete_roles permission + trail + .access_mut() + .delete_role(&user_cap, &string::utf8(b"RoleToDelete"), &clock, ts::ctx(&mut scenario)); + + cleanup_capability_trail_and_clock(&scenario, user_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ECapabilityPermissionDenied)] +fun test_update_role_permissions_permission_denied() { + let admin_user = @0xAD; + let user = @0xB0B; + + let mut scenario = ts::begin(admin_user); + + // Setup + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + // Create roles + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Create a role to update + let perms = permission::from_vec(vector[permission::add_record()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"RoleToUpdate"), + perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Create role WITHOUT update_roles permission + let no_update_perms = permission::from_vec(vector[permission::add_record()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"NoUpdateRolePerm"), + no_update_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + let user_cap = test_utils::new_capability_without_restrictions( + trail.access_mut(), + &admin_cap, + &string::utf8(b"NoUpdateRolePerm"), + &clock, + ts::ctx(&mut scenario), + ); + + transfer::public_transfer(user_cap, user); + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + // User tries to update a role - should fail + ts::next_tx(&mut scenario, user); + { + let (user_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let new_perms = permission::from_vec(vector[permission::delete_record()]); + + // This should fail - no update_roles permission + trail + .access_mut() + .update_role( + &user_cap, + &string::utf8(b"RoleToUpdate"), + new_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, user_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ERoleDoesNotExist)] +fun test_get_role_permissions_nonexistent() { + let admin_user = @0xAD; + + let mut scenario = ts::begin(admin_user); + + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + admin_cap.destroy_for_testing(); + }; + + ts::next_tx(&mut scenario, admin_user); + { + let trail = ts::take_shared>(&scenario); + + // This should fail - role doesn't exist + let _perms = trail.access().get_role_permissions(&string::utf8(b"NonExistentRole")); + + ts::return_shared(trail); + }; + + ts::end(scenario); +} + +#[test] +fun test_update_role_permissions_success() { + let admin_user = @0xAD; + + let mut scenario = ts::begin(admin_user); + + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + // Create a role with add_record permission + let initial_perms = permission::from_vec(vector[permission::add_record()]); + trail + .access_mut() + .create_role( + &admin_cap, + string::utf8(b"TestRole"), + initial_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify the role was created with add_record permission + let perms = trail.access().get_role_permissions(&string::utf8(b"TestRole")); + assert!(perms.contains(&permission::add_record()), 0); + assert!(!perms.contains(&permission::delete_record()), 1); + + // Update the role to have delete_record permission instead + let new_perms = permission::from_vec(vector[permission::delete_record()]); + trail + .access_mut() + .update_role( + &admin_cap, + &string::utf8(b"TestRole"), + new_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + // Verify the permissions were updated + let updated_perms = trail.access().get_role_permissions(&string::utf8(b"TestRole")); + assert!(!updated_perms.contains(&permission::add_record()), 2); + assert!(updated_perms.contains(&permission::delete_record()), 3); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::main::ERecordTagNotDefined)] +fun test_update_role_permissions_rejects_undefined_record_tags() { + let admin_user = @0xAD; + let mut scenario = ts::begin(admin_user); + + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail_with_tags( + &mut scenario, + locking_config, + std::option::none(), + vector[string::utf8(b"legal")], + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + trail.create_role( + &admin_cap, + string::utf8(b"TestRole"), + permission::from_vec(vector[permission::add_record()]), + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + trail.update_role_permissions( + &admin_cap, + string::utf8(b"TestRole"), + permission::from_vec(vector[permission::add_record()]), + std::option::some(record_tags::new_role_tags(vector[string::utf8(b"finance")])), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} + +#[test] +#[expected_failure(abort_code = audit_trail::role_map::ERoleDoesNotExist)] +fun test_update_role_permissions_nonexistent() { + let admin_user = @0xAD; + + let mut scenario = ts::begin(admin_user); + + { + let locking_config = locking::new( + locking::window_count_based(0), + timelock::none(), + timelock::none(), + ); + let (admin_cap, _) = setup_test_audit_trail( + &mut scenario, + locking_config, + std::option::none(), + ); + transfer::public_transfer(admin_cap, admin_user); + }; + + ts::next_tx(&mut scenario, admin_user); + { + let (admin_cap, mut trail, clock) = fetch_capability_trail_and_clock(&mut scenario); + + let new_perms = permission::from_vec(vector[permission::add_record()]); + + // This should fail - role doesn't exist + trail + .access_mut() + .update_role( + &admin_cap, + &string::utf8(b"NonExistentRole"), + new_perms, + std::option::none(), + &clock, + ts::ctx(&mut scenario), + ); + + cleanup_capability_trail_and_clock(&scenario, admin_cap, trail, clock); + }; + + ts::end(scenario); +} diff --git a/audit-trail-move/tests/test_utils.move b/audit-trail-move/tests/test_utils.move new file mode 100644 index 00000000..9c2f4d72 --- /dev/null +++ b/audit-trail-move/tests/test_utils.move @@ -0,0 +1,233 @@ +#[test_only] +module audit_trail::test_utils; + +use audit_trail::{locking, main::{Self, AuditTrail}, record::{Self, Data}}; +use iota::{clock::{Self, Clock}, test_scenario::{Self as ts, Scenario}}; +use std::string; +use tf_components::{capability::Capability, role_map::RoleMap}; + +const INITIAL_TIME_FOR_TESTING: u64 = 1234567; + +/// Test data type for audit trail records +public struct TestData has copy, drop, store { + value: u64, + message: vector, +} + +public(package) fun new_test_data(value: u64, message: vector): TestData { + TestData { + value, + message, + } +} + +public(package) fun test_data_value(data: &TestData): u64 { + data.value +} + +public(package) fun test_data_message(data: &TestData): vector { + data.message +} + +public(package) fun initial_time_for_testing(): u64 { + INITIAL_TIME_FOR_TESTING +} + +/// Setup a test audit trail with optional initial data +public(package) fun setup_test_audit_trail( + scenario: &mut Scenario, + locking_config: locking::LockingConfig, + initial_data: Option, +): (Capability, iota::object::ID) { + setup_test_audit_trail_impl(scenario, locking_config, initial_data, vector[]) +} + +/// Setup a test audit trail with optional initial data and available record tags. +public(package) fun setup_test_audit_trail_with_tags( + scenario: &mut Scenario, + locking_config: locking::LockingConfig, + initial_data: Option, + available_record_tags: vector, +): (Capability, iota::object::ID) { + setup_test_audit_trail_impl(scenario, locking_config, initial_data, available_record_tags) +} + +/// Setup a test audit trail backed by the `TestData` helper type. +public(package) fun setup_test_data_audit_trail( + scenario: &mut Scenario, + locking_config: locking::LockingConfig, + initial_data: Option, +): (Capability, iota::object::ID) { + setup_test_audit_trail_impl(scenario, locking_config, initial_data, vector[]) +} + +/// Setup a test audit trail backed by `TestData` with available record tags. +public(package) fun setup_test_data_audit_trail_with_tags( + scenario: &mut Scenario, + locking_config: locking::LockingConfig, + initial_data: Option, + available_record_tags: vector, +): (Capability, iota::object::ID) { + setup_test_audit_trail_impl( + scenario, + locking_config, + initial_data, + available_record_tags, + ) +} + +fun setup_test_audit_trail_impl( + scenario: &mut Scenario, + locking_config: locking::LockingConfig, + initial_data: Option, + available_record_tags: vector, +): (Capability, iota::object::ID) { + let (admin_cap, trail_id) = { + let mut clock = clock::create_for_testing(ts::ctx(scenario)); + clock.set_for_testing(INITIAL_TIME_FOR_TESTING); + + let trail_metadata = main::new_trail_metadata( + string::utf8(b"Setup Test Trail"), + option::some(string::utf8(b"Setup Test Trail Description")), + ); + + let initial_record = if (initial_data.is_some()) { + option::some( + record::new_initial_record( + initial_data.destroy_some(), + option::none(), + option::none(), + ), + ) + } else { + initial_data.destroy_none(); + option::none() + }; + + let (admin_cap, trail_id) = main::create( + initial_record, + locking_config, + option::some(trail_metadata), + option::none(), + available_record_tags, + &clock, + ts::ctx(scenario), + ); + + clock::destroy_for_testing(clock); + (admin_cap, trail_id) + }; + + (admin_cap, trail_id) +} + +/// Create a new unrestricted capability with a specific role without any +/// address, valid_from, or valid_until restrictions. +/// +/// Returns the newly created capability. +/// +/// Sends a CapabilityIssued event upon successful creation. +/// +/// Errors: +/// - Aborts with EPermissionDenied if the provided capability does not have the permission specified with `CapabilityAdminPermissions::add`. +/// - Aborts with ERoleDoesNotExist if the specified role does not exist in the role_map. +public fun new_capability_without_restrictions( + role_map: &mut RoleMap, + cap: &Capability, + role: &string::String, + clock: &Clock, + ctx: &mut TxContext, +): Capability { + role_map.new_capability( + cap, + role, + std::option::none(), + std::option::none(), + std::option::none(), + clock, + ctx, + ) +} + +/// Create a new capability with a specific role that expires at a given timestamp (milliseconds since Unix epoch). +/// +/// Returns the newly created capability. +/// +/// Sends a CapabilityIssued event upon successful creation. +/// +/// Errors: +/// - Aborts with EPermissionDenied if the provided capability does not have the permission specified with `CapabilityAdminPermissions::add`. +/// - Aborts with ERoleDoesNotExist if the specified role does not exist in the role_map. +public(package) fun new_capability_valid_until( + role_map: &mut RoleMap, + cap: &Capability, + role: &string::String, + valid_until: u64, + clock: &Clock, + ctx: &mut TxContext, +): Capability { + role_map.new_capability( + cap, + role, + std::option::none(), + std::option::none(), + std::option::some(valid_until), + clock, + ctx, + ) +} + +/// Create a new capability with a specific role restricted to an address. +/// Optionally set an expiration time (milliseconds since Unix epoch). +/// +/// Returns the newly created capability. +/// +/// Sends a CapabilityIssued event upon successful creation. +/// +/// Errors: +/// - Aborts with EPermissionDenied if the provided capability does not have the permission specified with `CapabilityAdminPermissions::add`. +/// - Aborts with ERoleDoesNotExist if the specified role does not exist in the role_map. +public fun new_capability_for_address( + role_map: &mut RoleMap, + cap: &Capability, + role: &string::String, + issued_to: address, + valid_until: Option, + clock: &Clock, + ctx: &mut TxContext, +): Capability { + role_map.new_capability( + cap, + role, + std::option::some(issued_to), + std::option::none(), + valid_until, + clock, + ctx, + ) +} + +public(package) fun fetch_capability_trail_and_clock( + scenario: &mut Scenario, +): (Capability, AuditTrail, Clock) { + let admin_cap = ts::take_from_sender(scenario); + let trail = ts::take_shared>(scenario); + let clock = iota::clock::create_for_testing(ts::ctx(scenario)); + (admin_cap, trail, clock) +} + +public(package) fun cleanup_capability_trail_and_clock( + scenario: &Scenario, + cap: Capability, + trail: AuditTrail, + clock: Clock, +) { + iota::clock::destroy_for_testing(clock); + ts::return_to_sender(scenario, cap); + ts::return_shared(trail); +} + +public(package) fun cleanup_trail_and_clock(trail: AuditTrail, clock: Clock) { + iota::clock::destroy_for_testing(clock); + ts::return_shared(trail); +} diff --git a/audit-trail-rs/Cargo.toml b/audit-trail-rs/Cargo.toml new file mode 100644 index 00000000..648036ff --- /dev/null +++ b/audit-trail-rs/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "audit_trail" +version = "0.1.0-alpha" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +keywords = ["iota", "tangle", "utxo", "audit-trail", "audit-trail"] +license.workspace = true +readme = "./README.md" +repository.workspace = true +rust-version.workspace = true +description = "An audit trail toolkit for the IOTA Ledger." + +[dependencies] +anyhow.workspace = true +async-trait.workspace = true +bcs.workspace = true +iota-caip = { git = "https://github.com/iotaledger/iota-caip.git", default-features = false, features = ["iota"], optional = true } +iota_interaction = { workspace = true, default-features = false } +product_common = { workspace = true, default-features = false, features = ["transaction"] } +secret-storage = { workspace = true, default-features = false } +serde.workspace = true +serde-aux = { workspace = true, default-features = false } +serde_json.workspace = true +strum.workspace = true +thiserror.workspace = true + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +iota_interaction_rust = { workspace = true, default-features = false } +iota-sdk = { workspace = true } +tokio = { workspace = true } + +[target.'cfg(target_arch = "wasm32")'.dependencies] +iota_interaction_ts.workspace = true +js-sys = "0.3" +product_common = { workspace = true, default-features = false, features = ["bindings"] } +tokio = { version = "1.46.1", default-features = false, features = ["sync"] } + +[dev-dependencies] +async-trait.workspace = true +iota_interaction = { workspace = true } +product_common = { workspace = true, features = ["transaction", "test-utils"] } + +[build-dependencies] +product_common = { workspace = true, features = ["move-history-manager"] } + +[features] +default = ["send-sync"] +send-sync = [ + "send-sync-storage", + "product_common/send-sync", + "iota_interaction/send-sync-transaction", +] +# Enables `Send` + `Sync` bounds for the storage traits. +send-sync-storage = ["secret-storage/send-sync-storage"] +# Enables an high-level integration with IOTA gas-station. +gas-station = ["product_common/gas-station"] +# Uses a default HTTP Client instead of a user-provided one. +default-http-client = ["product_common/default-http-client"] +# Enables the interaction with IOTA Resource Locators. +irl = ["dep:iota-caip"] diff --git a/audit-trail-rs/README.md b/audit-trail-rs/README.md new file mode 100644 index 00000000..f02ca754 --- /dev/null +++ b/audit-trail-rs/README.md @@ -0,0 +1,61 @@ +![banner](https://github.com/iotaledger/notarization/raw/HEAD/.github/banner_notarization.png) + +

+ StackExchange + Discord + Apache 2.0 license +

+ +

+ Introduction ◈ + Documentation & Resources ◈ + Bindings ◈ + Contributing +

+ +--- + +# IOTA Audit Trail Rust SDK + +## Introduction + +`audit_trail` is the Rust SDK for reading and writing audit trails on the IOTA ledger. + +An audit trail is a shared on-chain object that stores a sequential series of records together with: + +- role-based access control backed by capabilities +- trail-level locking rules for writes and deletions +- tag registries for record categorization +- immutable creation metadata and optional updatable metadata + +The crate provides: + +- read-only and signing client wrappers for the on-chain audit-trail package +- typed trail handles for records, locking, access control, and tags +- serializable Rust representations of on-chain objects and emitted events +- transaction builders that integrate with the shared `product_common` transaction flow + +## Documentation And Resources + +- [Audit Trail Move Package](https://github.com/iotaledger/notarization/tree/main/audit-trail-move): On-chain contract package that defines the shared object model, permissions, locking, and events. +- [Wasm SDK](https://github.com/iotaledger/notarization/tree/main/bindings/wasm/audit_trail_wasm): JavaScript and TypeScript bindings for browser and Node.js integrations. +- [Wasm Examples](https://github.com/iotaledger/notarization/tree/main/bindings/wasm/audit_trail_wasm/examples/README.md): Runnable audit-trail examples for JS and TS consumers. +- [Repository Examples](https://github.com/iotaledger/notarization/tree/main/examples/README.md): End-to-end examples across the broader repository. + +This README is also used as the crate-level rustdoc entry point, while the source files provide detailed API documentation for all public types and methods. + +## Bindings + +[Foreign Function Interface (FFI)](https://en.wikipedia.org/wiki/Foreign_function_interface) bindings of this Rust SDK to other programming languages: + +- [Web Assembly](https://github.com/iotaledger/notarization/tree/main/bindings/wasm/audit_trail_wasm) (JavaScript/TypeScript) + +## Contributing + +We would love to have you help us with the development of IOTA Audit Trail. Each and every contribution is greatly valued. + +Please review the [contribution](https://docs.iota.org/developer/iota-notarization/contribute) sections in the [IOTA Docs Portal](https://docs.iota.org/developer/iota-notarization/). + +To contribute directly to the repository, simply fork the project, push your changes to your fork and create a pull request to get them included. + +The best place to get involved in discussions about this library or to look for support at is the `#notarization` channel on the [IOTA Discord](https://discord.gg/iota-builders). You can also ask questions on our [Stack Exchange](https://iota.stackexchange.com/). diff --git a/audit-trail-rs/build.rs b/audit-trail-rs/build.rs new file mode 100644 index 00000000..3486b438 --- /dev/null +++ b/audit-trail-rs/build.rs @@ -0,0 +1,29 @@ +// Copyright 2020-2025 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use std::path::PathBuf; + +use product_common::move_history_manager::MoveHistoryManager; + +fn main() { + let move_lock_path = "../audit-trail-move/Move.lock"; + println!("[build.rs] move_lock_path: {move_lock_path}"); + let move_history_path = "../audit-trail-move/Move.history.json"; + println!("[build.rs] move_history_path: {move_history_path}"); + + MoveHistoryManager::new( + &PathBuf::from(move_lock_path), + &PathBuf::from(move_history_path), + // We will watch the default watch list (`get_default_aliases_to_watch()`) in this build script + // so we leave the `additional_aliases_to_watch` argument vec empty. + // Use for example `vec!["localnet".to_string()]` instead, if you don't want to ignore `localnet`. + vec![], + ) + .manage_history_file(|message| { + println!("[build.rs] {}", message); + }) + .expect("Successfully managed Move history file"); + + // Tell Cargo to rerun this build script if the Move.lock file changes. + println!("cargo::rerun-if-changed={move_lock_path}"); +} diff --git a/audit-trail-rs/src/client/full_client.rs b/audit-trail-rs/src/client/full_client.rs new file mode 100644 index 00000000..3c77523c --- /dev/null +++ b/audit-trail-rs/src/client/full_client.rs @@ -0,0 +1,343 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! # Audit Trail Client +//! +//! The full client extends [`AuditTrailClientReadOnly`] with signing support and write +//! transaction builders. +//! +//! ## Transaction Flow +//! +//! Write APIs return a [`TransactionBuilder`](product_common::transaction::transaction_builder::TransactionBuilder) +//! that you can configure before signing and submitting: +//! +//! ```rust,no_run +//! # use audit_trail::AuditTrailClient; +//! # use audit_trail::core::types::Data; +//! # async fn example( +//! # client: &AuditTrailClient< +//! # impl secret_storage::Signer + iota_interaction::OptionalSync, +//! # >, +//! # ) -> Result<(), Box> { +//! let created = client +//! .create_trail() +//! .with_initial_record_parts(Data::text("Initial record"), None, None) +//! .finish() +//! .with_gas_budget(1_000_000) +//! .build_and_execute(client) +//! .await?; +//! +//! let trail_id = created.output.trail_id; +//! +//! client +//! .trail(trail_id) +//! .records() +//! .add(Data::text("Follow-up record"), None, None) +//! .build_and_execute(client) +//! .await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! ## Example Workflow +//! +//! ```rust,no_run +//! # use audit_trail::AuditTrailClient; +//! # use audit_trail::core::types::{Data, PermissionSet, RoleTags}; +//! # async fn example( +//! # client: &AuditTrailClient< +//! # impl secret_storage::Signer + iota_interaction::OptionalSync, +//! # >, +//! # ) -> Result<(), Box> { +//! let created = client +//! .create_trail() +//! .with_initial_record_parts(Data::text("Initial record"), None, None) +//! .with_record_tags(["finance"]) +//! .finish() +//! .build_and_execute(client) +//! .await?; +//! +//! let trail_id = created.output.trail_id; +//! +//! client +//! .trail(trail_id) +//! .access() +//! .for_role("TaggedWriter") +//! .create(PermissionSet::record_admin_permissions(), Some(RoleTags::new(["finance"]))) +//! .build_and_execute(client) +//! .await?; +//! +//! client +//! .trail(trail_id) +//! .records() +//! .add(Data::text("Budget approved"), None, Some("finance".to_string())) +//! .build_and_execute(client) +//! .await?; +//! # Ok(()) +//! # } +//! ``` + +use std::ops::Deref; + +use async_trait::async_trait; +#[cfg(not(target_arch = "wasm32"))] +use iota_interaction::IotaClient; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::crypto::PublicKey; +use iota_interaction::types::transaction::ProgrammableTransaction; +use iota_interaction::{IotaKeySignature, OptionalSync}; +#[cfg(target_arch = "wasm32")] +use iota_interaction_ts::bindings::WasmIotaClient as IotaClient; +use product_common::core_client::{CoreClient, CoreClientReadOnly}; +use product_common::network_name::NetworkName; +use secret_storage::Signer; +use serde::de::DeserializeOwned; + +use crate::client::read_only::{AuditTrailClientReadOnly, PackageOverrides}; +use crate::core::builder::AuditTrailBuilder; +use crate::core::trail::{AuditTrailFull, AuditTrailHandle, AuditTrailReadOnly}; +use crate::error::Error; +use crate::iota_interaction_adapter::IotaClientAdapter; + +/// A marker type indicating the absence of a signer. +#[derive(Debug, Clone, Copy)] +#[non_exhaustive] +pub struct NoSigner; + +/// The error that results from a failed attempt at creating an [`AuditTrailClient`] +/// from a given [IotaClient]. +#[derive(Debug, thiserror::Error)] +#[error("failed to create an 'AuditTrailClient' from the given 'IotaClient'")] +#[non_exhaustive] +pub struct FromIotaClientError { + /// Type of failure for this error. + #[source] + pub kind: FromIotaClientErrorKind, +} + +/// Categories of failure for [`FromIotaClientError`]. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum FromIotaClientErrorKind { + /// A package ID is required, but was not supplied. + #[error("an audit-trail package ID must be supplied when connecting to an unofficial IOTA network")] + MissingPackageId, + /// Network ID resolution through an RPC call failed. + #[error("failed to resolve the network the given client is connected to")] + NetworkResolution(#[source] Box), +} + +/// A client for creating and managing audit trails on the IOTA blockchain. +/// +/// This client combines read-only capabilities with transaction signing, +/// enabling full interaction with audit trails. +/// +/// ## Type Parameter +/// +/// - `S`: The signer type that implements [`Signer`] +#[derive(Clone)] +pub struct AuditTrailClient { + /// The underlying read-only client used for executing read-only operations. + pub(super) read_client: AuditTrailClientReadOnly, + /// The public key associated with the signer, if any. + pub(super) public_key: Option, + /// The signer used for signing transactions, or `NoSigner` if the client is read-only. + pub(super) signer: S, +} + +impl Deref for AuditTrailClient { + type Target = AuditTrailClientReadOnly; + fn deref(&self) -> &Self::Target { + &self.read_client + } +} + +impl AuditTrailClient { + /// Creates a new client with no signing capabilities from an IOTA client. + /// + /// # Warning + /// + /// Passing `package_overrides` is only needed when connecting to a custom IOTA network or + /// when testing against explicitly deployed package pairs. + /// + /// Relying on a custom audit-trail package while connected to an official IOTA network is + /// strongly discouraged and can lead to compatibility problems with other official IOTA Trust + /// Framework products. + /// + /// # Examples + /// ```rust,ignore + /// # use audit_trail::client::AuditTrailClient; + /// + /// # #[tokio::main] + /// # async fn main() -> anyhow::Result<()> { + /// let iota_client = iota_sdk::IotaClientBuilder::default() + /// .build_testnet() + /// .await?; + /// // No package ID is required since we are connecting to an official IOTA network. + /// let audit_trail_client = AuditTrailClient::from_iota_client(iota_client, None).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn from_iota_client( + iota_client: IotaClient, + package_overrides: impl Into>, + ) -> Result { + let read_only_client = if let Some(package_overrides) = package_overrides.into() { + AuditTrailClientReadOnly::new_with_package_overrides(iota_client, package_overrides).await + } else { + AuditTrailClientReadOnly::new(iota_client).await + } + .map_err(|e| match e { + Error::InvalidConfig(_) => FromIotaClientErrorKind::MissingPackageId, + Error::RpcError(msg) => FromIotaClientErrorKind::NetworkResolution(msg.into()), + _ => unreachable!( + "'AuditTrailClientReadOnly::new' has been changed without updating error handling in 'AuditTrailClient::from_iota_client'" + ), + }) + .map_err(|kind| FromIotaClientError { kind })?; + + Ok(Self { + read_client: read_only_client, + public_key: None, + signer: NoSigner, + }) + } +} + +impl AuditTrailClient { + /// Creates a signing client from an existing read-only client and signer. + /// + /// # Errors + /// + /// Returns an error if the signer public key cannot be loaded. + pub async fn new(client: AuditTrailClientReadOnly, signer: S) -> Result + where + S: Signer, + { + let public_key = signer + .public_key() + .await + .map_err(|e| Error::InvalidKey(e.to_string()))?; + + Ok(AuditTrailClient { + read_client: client, + public_key: Some(public_key), + signer, + }) + } + + /// Replaces the signer used by this client. + /// + /// # Errors + /// + /// Returns an error if the replacement signer public key cannot be loaded. + pub async fn with_signer(self, signer: NewS) -> Result, secret_storage::Error> + where + NewS: Signer, + { + let public_key = signer.public_key().await?; + + Ok(AuditTrailClient { + read_client: self.read_client, + public_key: Some(public_key), + signer, + }) + } + /// Returns the underlying read-only client view. + pub fn read_only(&self) -> &AuditTrailClientReadOnly { + &self.read_client + } + + /// Returns a typed handle bound to a specific trail object ID. + pub fn trail<'a>(&'a self, trail_id: ObjectID) -> AuditTrailHandle<'a, Self> { + AuditTrailHandle::new(self, trail_id) + } + + /// Returns the TfComponents package ID used by this client. + pub fn tf_components_package_id(&self) -> ObjectID { + self.read_client.tf_components_package_id() + } + + /// Creates a builder for a new audit trail. + /// + /// When the client has a signer, the builder is pre-populated with that signer's address as + /// the initial admin. + pub fn create_trail(&self) -> AuditTrailBuilder { + AuditTrailBuilder { + admin: self.public_key.as_ref().map(IotaAddress::from), + ..AuditTrailBuilder::default() + } + } +} + +impl AuditTrailClient +where + S: Signer, +{ + /// Returns a reference to the [PublicKey] wrapped by this client. + pub fn public_key(&self) -> &PublicKey { + self.public_key.as_ref().expect("public_key is set") + } + + /// Returns the [IotaAddress] wrapped by this client. + #[inline(always)] + pub fn address(&self) -> IotaAddress { + IotaAddress::from(self.public_key()) + } +} + +#[cfg_attr(feature = "send-sync", async_trait)] +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +impl CoreClientReadOnly for AuditTrailClient { + fn package_id(&self) -> ObjectID { + self.read_client.package_id() + } + + fn tf_components_package_id(&self) -> Option { + Some(self.read_client.tf_components_package_id()) + } + + fn network_name(&self) -> &NetworkName { + self.read_client.network() + } + + fn client_adapter(&self) -> &IotaClientAdapter { + &self.read_client + } +} + +#[cfg_attr(feature = "send-sync", async_trait)] +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +impl CoreClient for AuditTrailClient +where + S: Signer + OptionalSync, +{ + fn signer(&self) -> &S { + &self.signer + } + + fn sender_address(&self) -> IotaAddress { + IotaAddress::from(self.public_key()) + } + + fn sender_public_key(&self) -> &PublicKey { + self.public_key() + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl AuditTrailReadOnly for AuditTrailClient +where + S: Signer + OptionalSync, +{ + /// Delegates read-only execution to the wrapped [`AuditTrailClientReadOnly`]. + async fn execute_read_only_transaction( + &self, + tx: ProgrammableTransaction, + ) -> Result { + self.read_client.execute_read_only_transaction(tx).await + } +} + +impl AuditTrailFull for AuditTrailClient where S: Signer + OptionalSync {} diff --git a/audit-trail-rs/src/client/mod.rs b/audit-trail-rs/src/client/mod.rs new file mode 100644 index 00000000..feef288f --- /dev/null +++ b/audit-trail-rs/src/client/mod.rs @@ -0,0 +1,32 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Client implementations for interacting with audit trails on the IOTA ledger. +//! +//! [`AuditTrailClientReadOnly`] is the entry point for read-only inspection and typed trail handles. +//! [`AuditTrailClient`] wraps a read-only client together with a signer so it can build write +//! transactions through the shared transaction infrastructure. + +use iota_interaction::IotaClientTrait; +use product_common::network_name::NetworkName; + +use crate::error::Error; +use crate::iota_interaction_adapter::IotaClientAdapter; + +/// A signing client that can create audit-trail transaction builders. +pub mod full_client; +/// A read-only client that resolves package IDs and executes inspected calls. +pub mod read_only; + +pub use full_client::*; +pub use read_only::*; + +/// Resolves the network name reported by the given IOTA client. +async fn network_id(iota_client: &IotaClientAdapter) -> Result { + let network_id = iota_client + .read_api() + .get_chain_identifier() + .await + .map_err(|e| Error::RpcError(e.to_string()))?; + Ok(network_id.try_into().expect("chain ID is a valid network name")) +} diff --git a/audit-trail-rs/src/client/read_only.rs b/audit-trail-rs/src/client/read_only.rs new file mode 100644 index 00000000..3b765a4d --- /dev/null +++ b/audit-trail-rs/src/client/read_only.rs @@ -0,0 +1,217 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Read-only client support for audit-trail interactions. +//! +//! [`AuditTrailClientReadOnly`] resolves the deployed package IDs for the connected network, exposes +//! typed trail handles, and provides the internal read-only execution primitive used by the handle +//! APIs. + +use std::ops::Deref; + +#[cfg(not(target_arch = "wasm32"))] +use iota_interaction::IotaClient; +use iota_interaction::IotaClientTrait; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::transaction::{ProgrammableTransaction, TransactionKind}; +#[cfg(target_arch = "wasm32")] +use iota_interaction_ts::bindings::WasmIotaClient; +use product_common::core_client::CoreClientReadOnly; +use product_common::network_name::NetworkName; +use serde::de::DeserializeOwned; + +use super::network_id; +use crate::core::trail::{AuditTrailHandle, AuditTrailReadOnly}; +use crate::error::Error; +use crate::iota_interaction_adapter::IotaClientAdapter; +use crate::package; + +/// Explicit package-ID overrides used when constructing an audit-trail client. +/// +/// Use this when talking to custom deployments, local test networks, or any environment where the +/// package registry does not yet know the relevant package IDs. +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] +pub struct PackageOverrides { + /// Override for the audit-trail package itself. + pub audit_trail: Option, + /// Override for the `tf_components` package used by time locks and capabilities. + pub tf_component: Option, +} + +/// A read-only client for interacting with audit-trail objects on a specific network. +/// +/// This is the main entry point for applications that only need package resolution and typed read +/// helpers. Once constructed, use [`Self::trail`] to create lightweight handles scoped to a single +/// trail object. +/// +/// For write flows, wrap this client in [`crate::AuditTrailClient`]. +#[derive(Clone)] +pub struct AuditTrailClientReadOnly { + /// The underlying IOTA client adapter used for communication. + iota_client: IotaClientAdapter, + /// The [`ObjectID`] of the deployed audit trail package (smart contract). + audit_trail_pkg_id: ObjectID, + /// The [`ObjectID`] of the deployed TfComponents package used by audit trails. + pub(crate) tf_components_pkg_id: ObjectID, + /// The name of the network this client is connected to (e.g., "mainnet", "testnet"). + network: NetworkName, + /// Raw chain identifier returned by the IOTA node. + chain_id: String, +} + +impl Deref for AuditTrailClientReadOnly { + type Target = IotaClientAdapter; + fn deref(&self) -> &Self::Target { + &self.iota_client + } +} + +impl AuditTrailClientReadOnly { + /// Returns the name of the network the client is connected to. + pub const fn network(&self) -> &NetworkName { + &self.network + } + + /// Returns the raw chain identifier for the network this client is connected to. + pub fn chain_id(&self) -> &str { + &self.chain_id + } + + /// Returns the package ID used by this client. + /// + /// This is the deployed audit-trail Move package ID, not a trail object ID. + pub fn package_id(&self) -> ObjectID { + self.audit_trail_pkg_id + } + + /// Returns the TfComponents package ID used by this client. + pub fn tf_components_package_id(&self) -> ObjectID { + self.tf_components_pkg_id + } + + /// Returns a reference to the underlying IOTA client adapter. + pub const fn iota_client(&self) -> &IotaClientAdapter { + &self.iota_client + } + + /// Returns a typed handle bound to a specific trail object ID. + /// + /// Creating the handle is cheap. Reads only happen when you call methods on the returned + /// [`AuditTrailHandle`], such as [`AuditTrailHandle::get`]. + pub fn trail<'a>(&'a self, trail_id: ObjectID) -> AuditTrailHandle<'a, Self> { + AuditTrailHandle::new(self, trail_id) + } + + /// Creates a new read-only client from an IOTA client. + /// + /// The package IDs are resolved from the internal registry using the connected network name. + /// This is the recommended constructor when connecting to official deployments whose package + /// history is already tracked by the crate. + /// + /// # Errors + /// + /// Returns an error if the network cannot be resolved or if the package IDs for that network + /// cannot be determined. + pub async fn new( + #[cfg(target_arch = "wasm32")] iota_client: WasmIotaClient, + #[cfg(not(target_arch = "wasm32"))] iota_client: IotaClient, + ) -> Result { + let client = IotaClientAdapter::new(iota_client); + let network = network_id(&client).await?; + Self::new_internal(client, network, PackageOverrides::default()).await + } + + async fn new_internal( + iota_client: IotaClientAdapter, + network: NetworkName, + package_overrides: PackageOverrides, + ) -> Result { + let chain_id = network.as_ref().to_string(); + let (network, package_ids) = package::resolve_package_ids(&network, &package_overrides).await?; + + Ok(Self { + iota_client, + audit_trail_pkg_id: package_ids.audit_trail_package_id, + tf_components_pkg_id: package_ids.tf_components_package_id, + network, + chain_id, + }) + } + + /// Creates a new read-only client with explicit package-ID overrides. + /// + /// This bypasses the default package-registry lookup for any IDs provided in + /// [`PackageOverrides`]. + /// + /// Prefer this constructor when talking to custom deployments, local networks, or preview + /// environments whose package IDs are not yet part of the built-in registry. + /// + /// # Errors + /// + /// Returns an error if the network cannot be resolved or if the resulting package-ID + /// configuration is invalid. + pub async fn new_with_package_overrides( + #[cfg(target_arch = "wasm32")] iota_client: WasmIotaClient, + #[cfg(not(target_arch = "wasm32"))] iota_client: IotaClient, + package_overrides: PackageOverrides, + ) -> Result { + let client = IotaClientAdapter::new(iota_client); + let network = network_id(&client).await?; + Self::new_internal(client, network, package_overrides).await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait::async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait::async_trait)] +impl CoreClientReadOnly for AuditTrailClientReadOnly { + fn package_id(&self) -> ObjectID { + self.audit_trail_pkg_id + } + + fn tf_components_package_id(&self) -> Option { + Some(self.tf_components_pkg_id) + } + + fn network_name(&self) -> &NetworkName { + &self.network + } + + fn client_adapter(&self) -> &IotaClientAdapter { + &self.iota_client + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait::async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait::async_trait)] +impl AuditTrailReadOnly for AuditTrailClientReadOnly { + /// Executes a programmable transaction through `dev_inspect` and decodes the first return + /// value as `T`. + /// + /// This is primarily used by the typed read-only handle APIs. + async fn execute_read_only_transaction( + &self, + tx: ProgrammableTransaction, + ) -> Result { + let inspection_result = self + .iota_client + .read_api() + .dev_inspect_transaction_block(IotaAddress::ZERO, TransactionKind::programmable(tx), None, None, None) + .await + .map_err(|err| Error::UnexpectedApiResponse(format!("Failed to inspect transaction block: {err}")))?; + + let execution_results = inspection_result + .results + .ok_or_else(|| Error::UnexpectedApiResponse("DevInspectResults missing 'results' field".to_string()))?; + + let (return_value_bytes, _) = execution_results + .first() + .ok_or_else(|| Error::UnexpectedApiResponse("Execution results list is empty".to_string()))? + .return_values + .first() + .ok_or_else(|| Error::InvalidArgument("should have at least one return value".to_string()))?; + + let deserialized_output = bcs::from_bytes::(return_value_bytes)?; + + Ok(deserialized_output) + } +} diff --git a/audit-trail-rs/src/core/access/mod.rs b/audit-trail-rs/src/core/access/mod.rs new file mode 100644 index 00000000..7f33fe55 --- /dev/null +++ b/audit-trail-rs/src/core/access/mod.rs @@ -0,0 +1,283 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Role and capability management APIs for audit trails. +//! +//! This module is the Rust-facing wrapper around the access-control state integrated into each audit trail. +//! Roles grant [`PermissionSet`] values, while capability objects bind one role to one trail and may add +//! optional address or time restrictions. +//! +//! Additional record-tag constraints are represented as [`RoleTags`]. They narrow which tagged records a role +//! may operate on, but they do not replace the underlying permission checks enforced by the Move package. + +use iota_interaction::types::base_types::ObjectID; +use iota_interaction::{IotaKeySignature, OptionalSync}; +use product_common::core_client::CoreClient; +use product_common::transaction::transaction_builder::TransactionBuilder; +use secret_storage::Signer; + +use crate::core::trail::AuditTrailFull; +use crate::core::types::{CapabilityIssueOptions, PermissionSet, RoleTags}; + +mod operations; +mod transactions; + +pub use transactions::{ + CleanupRevokedCapabilities, CreateRole, DeleteRole, DestroyCapability, DestroyInitialAdminCapability, + IssueCapability, RevokeCapability, RevokeInitialAdminCapability, UpdateRole, +}; + +/// Access-control API scoped to a specific trail. +/// +/// This handle exposes role-management and capability-management operations for one trail. All authorization is +/// still enforced against the capability supplied during transaction construction. +#[derive(Debug, Clone)] +pub struct TrailAccess<'a, C> { + pub(crate) client: &'a C, + pub(crate) trail_id: ObjectID, + pub(crate) selected_capability_id: Option, +} + +impl<'a, C> TrailAccess<'a, C> { + pub(crate) fn new(client: &'a C, trail_id: ObjectID, selected_capability_id: Option) -> Self { + Self { + client, + trail_id, + selected_capability_id, + } + } + + /// Uses the provided capability as the auth capability for subsequent write operations. + pub fn using_capability(mut self, capability_id: ObjectID) -> Self { + self.selected_capability_id = Some(capability_id); + self + } + + /// Returns a role-scoped handle for the given role name. + /// + /// The returned handle only identifies the role. Existence and authorization are checked when the + /// resulting transaction is built and executed. + pub fn for_role(&self, name: impl Into) -> RoleHandle<'a, C> { + RoleHandle::new(self.client, self.trail_id, name.into(), self.selected_capability_id) + } + + /// Revokes an issued capability. + /// + /// Revocation adds the capability ID to the trail's denylist. Pass the capability's `valid_until` value + /// when it is known so later cleanup keeps the same expiry semantics. + pub fn revoke_capability( + &self, + capability_id: ObjectID, + capability_valid_until: Option, + ) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(RevokeCapability::new( + self.trail_id, + owner, + capability_id, + capability_valid_until, + self.selected_capability_id, + )) + } + + /// Destroys a capability object. + /// + /// This consumes the owned capability object itself. It uses the generic capability-destruction path and + /// therefore must not be used for initial-admin capabilities. + pub fn destroy_capability(&self, capability_id: ObjectID) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(DestroyCapability::new( + self.trail_id, + owner, + capability_id, + self.selected_capability_id, + )) + } + + /// Destroys an initial-admin capability without presenting another authorization capability. + /// + /// Initial-admin capability IDs are tracked separately, so they cannot be removed through the generic + /// destroy path. + pub fn destroy_initial_admin_capability( + &self, + capability_id: ObjectID, + ) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + TransactionBuilder::new(DestroyInitialAdminCapability::new(self.trail_id, capability_id)) + } + + /// Revokes an initial-admin capability by ID. + /// + /// Like [`TrailAccess::revoke_capability`], this writes to the denylist. The dedicated entry point exists + /// because initial-admin capability IDs are protected separately. + pub fn revoke_initial_admin_capability( + &self, + capability_id: ObjectID, + capability_valid_until: Option, + ) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(RevokeInitialAdminCapability::new( + self.trail_id, + owner, + capability_id, + capability_valid_until, + self.selected_capability_id, + )) + } + + /// Removes expired entries from the revoked-capability denylist. + /// + /// Only entries whose stored expiry has passed are removed. Revocations without an expiry remain until + /// they are explicitly destroyed or the trail is deleted. + pub fn cleanup_revoked_capabilities(&self) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(CleanupRevokedCapabilities::new( + self.trail_id, + owner, + self.selected_capability_id, + )) + } +} + +/// Role-scoped access-control API. +/// +/// A `RoleHandle` identifies one role name inside the trail's access-control state and builds transactions that +/// act on that role. +#[derive(Debug, Clone)] +pub struct RoleHandle<'a, C> { + pub(crate) client: &'a C, + pub(crate) trail_id: ObjectID, + pub(crate) name: String, + pub(crate) selected_capability_id: Option, +} + +impl<'a, C> RoleHandle<'a, C> { + pub(crate) fn new( + client: &'a C, + trail_id: ObjectID, + name: String, + selected_capability_id: Option, + ) -> Self { + Self { + client, + trail_id, + name, + selected_capability_id, + } + } + + /// Uses the provided capability as the auth capability for subsequent write operations. + pub fn using_capability(mut self, capability_id: ObjectID) -> Self { + self.selected_capability_id = Some(capability_id); + self + } + + /// Returns the role name represented by this handle. + pub fn name(&self) -> &str { + &self.name + } + + /// Creates this role with the provided permissions and optional role-tag + /// access rules. + /// + /// Any supplied [`RoleTags`] must already exist in the trail-owned tag + /// registry. The tag list is stored as + /// role data on the Move side and is later used for tag-aware record authorization. + pub fn create(&self, permissions: PermissionSet, role_tags: Option) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(CreateRole::new( + self.trail_id, + owner, + self.name.clone(), + permissions, + role_tags, + self.selected_capability_id, + )) + } + + /// Issues a capability for this role using optional restrictions. + /// + /// The resulting capability always targets this trail and grants exactly + /// this role. `issued_to`, + /// `valid_from_ms`, and `valid_until_ms` only configure restrictions on + /// the issued object; enforcement + /// happens on-chain when the capability is later used. + pub fn issue_capability(&self, options: CapabilityIssueOptions) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(IssueCapability::new( + self.trail_id, + owner, + self.name.clone(), + options, + self.selected_capability_id, + )) + } + + /// Updates permissions and role-tag access rules for this role. + /// + /// As with [`RoleHandle::create`], any supplied [`RoleTags`] must already + /// exist in the trail tag registry. + pub fn update_permissions( + &self, + permissions: PermissionSet, + role_tags: Option, + ) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(UpdateRole::new( + self.trail_id, + owner, + self.name.clone(), + permissions, + role_tags, + self.selected_capability_id, + )) + } + + /// Deletes this role. + /// + /// The reserved initial-admin role cannot be deleted. + pub fn delete(&self) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(DeleteRole::new( + self.trail_id, + owner, + self.name.clone(), + self.selected_capability_id, + )) + } +} diff --git a/audit-trail-rs/src/core/access/operations.rs b/audit-trail-rs/src/core/access/operations.rs new file mode 100644 index 00000000..dba653e2 --- /dev/null +++ b/audit-trail-rs/src/core/access/operations.rs @@ -0,0 +1,381 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Internal access-control helpers that build role and capability transactions. +//! +//! These helpers encode Rust-side access inputs into the exact Move call shapes expected by the audit-trail +//! package and apply the lightweight preflight checks that are cheaper to surface before submission. + +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::transaction::{ObjectArg, ProgrammableTransaction}; +use iota_interaction::{OptionalSync, ident_str}; +use product_common::core_client::CoreClientReadOnly; + +use crate::core::internal::{trail as trail_reader, tx}; +use crate::core::types::{CapabilityIssueOptions, Permission, PermissionSet, RoleTags}; +use crate::error::Error; + +/// Internal namespace for role and capability transaction construction. +/// +/// Each helper selects the required authorization permission, prepares +/// Move-compatible arguments, and then +/// delegates to the shared trail transaction builders in [`crate::core::internal::tx`]. +pub(super) struct AccessOps; + +impl AccessOps { + /// Builds the `create_role` call. + /// + /// `role_tags`, when present, are validated against the trail tag registry + /// before PTB construction so the + /// Rust side fails early with `Error::InvalidArgument` instead of relying on a later Move abort. + pub(super) async fn create_role( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + name: String, + permissions: PermissionSet, + role_tags: Option, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + assert_role_tags_defined(client, trail_id, &role_tags).await?; + + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::AddRoles, + selected_capability_id, + "create_role", + |ptb, _| { + let role = tx::ptb_pure(ptb, "role", name)?; + let perms_vec = permissions.to_move_vec(client.package_id(), ptb)?; + let perms = ptb.programmable_move_call( + client.package_id(), + ident_str!("permission").into(), + ident_str!("from_vec").into(), + vec![], + vec![perms_vec], + ); + let role_tags_arg = match role_tags { + Some(role_tags) => { + let role_tags_arg = role_tags.to_ptb(ptb, client.package_id())?; + + tx::option_to_move(Some(role_tags_arg), RoleTags::tag(client.package_id()), ptb) + .map_err(|e| Error::InvalidArgument(format!("failed to build role_tags option: {e}")))? + } + None => tx::option_to_move(None, RoleTags::tag(client.package_id()), ptb) + .map_err(|e| Error::InvalidArgument(format!("failed to build role_tags option: {e}")))?, + }; + let clock = tx::get_clock_ref(ptb); + + Ok(vec![role, perms, role_tags_arg, clock]) + }, + ) + .await + } + + /// Builds the `update_role_permissions` call. + /// + /// The same tag-registry precondition as [`AccessOps::create_role`] applies because role-tag data is stored + /// on-chain as part of the role definition. + pub(super) async fn update_role( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + name: String, + permissions: PermissionSet, + role_tags: Option, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + assert_role_tags_defined(client, trail_id, &role_tags).await?; + + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::UpdateRoles, + selected_capability_id, + "update_role_permissions", + |ptb, _| { + let role = tx::ptb_pure(ptb, "role", name)?; + let perms_vec = permissions.to_move_vec(client.package_id(), ptb)?; + + let perms = ptb.programmable_move_call( + client.package_id(), + ident_str!("permission").into(), + ident_str!("from_vec").into(), + vec![], + vec![perms_vec], + ); + let role_tags_arg = match role_tags { + Some(role_tags) => { + let role_tags_arg = role_tags.to_ptb(ptb, client.package_id())?; + tx::option_to_move(Some(role_tags_arg), RoleTags::tag(client.package_id()), ptb) + .map_err(|e| Error::InvalidArgument(format!("failed to build role_tags option: {e}")))? + } + None => tx::option_to_move(None, RoleTags::tag(client.package_id()), ptb) + .map_err(|e| Error::InvalidArgument(format!("failed to build role_tags option: {e}")))?, + }; + + let clock = tx::get_clock_ref(ptb); + + Ok(vec![role, perms, role_tags_arg, clock]) + }, + ) + .await + } + + /// Builds the `delete_role` call. + /// + /// The PTB only carries the role name and clock reference. Protection of the initial-admin role remains an + /// access-control invariant enforced by the Move package. + pub(super) async fn delete_role( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + name: String, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::DeleteRoles, + selected_capability_id, + "delete_role", + |ptb, _| { + let role = tx::ptb_pure(ptb, "role", name)?; + let clock = tx::get_clock_ref(ptb); + + Ok(vec![role, clock]) + }, + ) + .await + } + + /// Builds the `new_capability` call for a role. + /// + /// Optional restrictions are serialized exactly as provided. Validation of `issued_to`, `valid_from`, and + /// `valid_until` semantics remains on-chain. + pub(super) async fn issue_capability( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + role_name: String, + options: CapabilityIssueOptions, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::AddCapabilities, + selected_capability_id, + "new_capability", + |ptb, _| { + let role = tx::ptb_pure(ptb, "role", role_name)?; + let issued_to = tx::ptb_pure(ptb, "issued_to", options.issued_to)?; + let valid_from = tx::ptb_pure(ptb, "valid_from", options.valid_from_ms)?; + let valid_until = tx::ptb_pure(ptb, "valid_until", options.valid_until_ms)?; + let clock = tx::get_clock_ref(ptb); + + Ok(vec![role, issued_to, valid_from, valid_until, clock]) + }, + ) + .await + } + + /// Builds the generic `revoke_capability` call. + /// + /// `capability_valid_until` is forwarded to the Move layer so the denylist can later be cleaned up without + /// losing the capability's original expiry boundary. + pub(super) async fn revoke_capability( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + capability_id: ObjectID, + capability_valid_until: Option, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::RevokeCapabilities, + selected_capability_id, + "revoke_capability", + |ptb, _| { + let cap = tx::ptb_pure(ptb, "capability_id", capability_id)?; + let valid_until = tx::ptb_pure(ptb, "capability_valid_until", capability_valid_until)?; + let clock = tx::get_clock_ref(ptb); + + Ok(vec![cap, valid_until, clock]) + }, + ) + .await + } + + /// Builds the generic `destroy_capability` call. + /// + /// This resolves the capability object reference up front because the Move entry point consumes the owned + /// capability object rather than only its ID. + pub(super) async fn destroy_capability( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + capability_id: ObjectID, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let capability_ref = tx::get_object_ref_by_id(client, &capability_id).await?; + + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::RevokeCapabilities, + selected_capability_id, + "destroy_capability", + |ptb, _| { + let cap_to_destroy = ptb + .obj(ObjectArg::ImmOrOwnedObject(capability_ref)) + .map_err(|e| Error::InvalidArgument(format!("Failed to create capability argument: {e}")))?; + let clock = tx::get_clock_ref(ptb); + + Ok(vec![cap_to_destroy, clock]) + }, + ) + .await + } + + /// Builds the dedicated `destroy_initial_admin_capability` call. + /// + /// Initial-admin capability IDs are tracked separately, so they cannot be destroyed through the generic + /// capability path. + pub(super) async fn destroy_initial_admin_capability( + client: &C, + trail_id: ObjectID, + capability_id: ObjectID, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let cap_ref = tx::get_object_ref_by_id(client, &capability_id).await?; + tx::build_trail_transaction_with_cap_ref( + client, + trail_id, + cap_ref, + "destroy_initial_admin_capability", + |_, _| Ok(vec![]), + ) + .await + } + + /// Builds the dedicated `revoke_initial_admin_capability` call. + /// + /// This keeps the same denylist-expiry behavior as [`AccessOps::revoke_capability`] while using the + /// separate Move entry point reserved for tracked initial-admin IDs. + pub(super) async fn revoke_initial_admin_capability( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + capability_id: ObjectID, + capability_valid_until: Option, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::RevokeCapabilities, + selected_capability_id, + "revoke_initial_admin_capability", + |ptb, _| { + let cap = tx::ptb_pure(ptb, "capability_id", capability_id)?; + let valid_until = tx::ptb_pure(ptb, "capability_valid_until", capability_valid_until)?; + let clock = tx::get_clock_ref(ptb); + + Ok(vec![cap, valid_until, clock]) + }, + ) + .await + } + + /// Builds the `cleanup_revoked_capabilities` call. + /// + /// Cleanup only prunes denylist entries whose stored expiry has elapsed. It does not change capability + /// objects and does not revoke any additional IDs. + pub(super) async fn cleanup_revoked_capabilities( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::RevokeCapabilities, + selected_capability_id, + "cleanup_revoked_capabilities", + |ptb, _| { + let clock = tx::get_clock_ref(ptb); + Ok(vec![clock]) + }, + ) + .await + } +} + +/// Verifies that every requested role tag already exists in the trail tag registry. +/// +/// Roles may only reference tags that are defined on the trail itself so later record-tag checks +/// stay consistent with the registry stored on-chain. +async fn assert_role_tags_defined(client: &C, trail_id: ObjectID, role_tags: &Option) -> Result<(), Error> +where + C: CoreClientReadOnly + OptionalSync, +{ + let Some(role_tags) = role_tags else { + return Ok(()); + }; + + let trail = trail_reader::get_audit_trail(trail_id, client).await?; + let undefined_tags = role_tags + .tags + .iter() + .filter(|tag| !trail.tags.contains_key(tag)) + .cloned() + .collect::>(); + + if undefined_tags.is_empty() { + Ok(()) + } else { + Err(Error::InvalidArgument(format!( + "role tags {:?} are not defined for trail {trail_id}", + undefined_tags + ))) + } +} diff --git a/audit-trail-rs/src/core/access/transactions.rs b/audit-trail-rs/src/core/access/transactions.rs new file mode 100644 index 00000000..7380dd4f --- /dev/null +++ b/audit-trail-rs/src/core/access/transactions.rs @@ -0,0 +1,759 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Transaction payloads for audit-trail role and capability administration. +//! +//! These types cache the generated programmable transaction, delegate PTB construction to +//! [`super::operations::AccessOps`], and decode the matching Move events into typed Rust outputs. + +use async_trait::async_trait; +use iota_interaction::OptionalSync; +use iota_interaction::rpc_types::{IotaTransactionBlockEffects, IotaTransactionBlockEvents}; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::transaction::ProgrammableTransaction; +use product_common::core_client::CoreClientReadOnly; +use product_common::transaction::transaction_builder::Transaction; +use tokio::sync::OnceCell; + +use super::operations::AccessOps; +use crate::core::types::{ + CapabilityDestroyed, CapabilityIssueOptions, CapabilityIssued, CapabilityRevoked, Event, PermissionSet, + RawRoleCreated, RawRoleDeleted, RawRoleUpdated, RoleCreated, RoleDeleted, RoleTags, RoleUpdated, +}; +use crate::error::Error; + +// ===== CreateRole ===== + +/// Transaction that creates a role on a trail. +/// +/// This maps to the audit-trail `create_role` Move entry point and therefore requires an authorization +/// capability with `AddRoles`. +#[derive(Debug, Clone)] +pub struct CreateRole { + trail_id: ObjectID, + owner: IotaAddress, + name: String, + permissions: PermissionSet, + role_tags: Option, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl CreateRole { + /// Creates a `CreateRole` transaction builder payload. + /// + /// `role_tags`, when present, are serialized as Move `record_tags::RoleTags` role data. + pub fn new( + trail_id: ObjectID, + owner: IotaAddress, + name: String, + permissions: PermissionSet, + role_tags: Option, + selected_capability_id: Option, + ) -> Self { + Self { + trail_id, + owner, + name, + permissions, + role_tags, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + AccessOps::create_role( + client, + self.trail_id, + self.owner, + self.name.clone(), + self.permissions.clone(), + self.role_tags.clone(), + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for CreateRole { + type Error = Error; + type Output = RoleCreated; + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply_with_events( + mut self, + _: &mut IotaTransactionBlockEffects, + events: &mut IotaTransactionBlockEvents, + _: &C, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let event = events + .data + .iter() + .find_map(|data| bcs::from_bytes::(data.bcs.bytes()).ok().map(Into::into)) + .ok_or_else(|| Error::UnexpectedApiResponse("RoleCreated event not found".to_string()))?; + + Ok(event) + } + + async fn apply(mut self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + unreachable!("RoleCreated output requires transaction events") + } +} + +/// Transaction that updates an existing role. +/// +/// This updates both the permission set and the optional role-tag data stored for the role. The entry point +/// requires `UpdateRoles`. +#[derive(Debug, Clone)] +pub struct UpdateRole { + trail_id: ObjectID, + owner: IotaAddress, + name: String, + permissions: PermissionSet, + role_tags: Option, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl UpdateRole { + /// Creates an `UpdateRole` transaction builder payload. + pub fn new( + trail_id: ObjectID, + owner: IotaAddress, + name: String, + permissions: PermissionSet, + role_tags: Option, + selected_capability_id: Option, + ) -> Self { + Self { + trail_id, + owner, + name, + permissions, + role_tags, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + AccessOps::update_role( + client, + self.trail_id, + self.owner, + self.name.clone(), + self.permissions.clone(), + self.role_tags.clone(), + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for UpdateRole { + type Error = Error; + type Output = RoleUpdated; + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply_with_events( + mut self, + _: &mut IotaTransactionBlockEffects, + events: &mut IotaTransactionBlockEvents, + _: &C, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let event = events + .data + .iter() + .find_map(|data| bcs::from_bytes::(data.bcs.bytes()).ok().map(Into::into)) + .ok_or_else(|| Error::UnexpectedApiResponse("RoleUpdated event not found".to_string()))?; + + Ok(event) + } + + async fn apply(mut self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + unreachable!() + } +} + +/// Transaction that deletes a role. +/// +/// The reserved initial-admin role cannot be deleted even if the caller holds `DeleteRoles`. +#[derive(Debug, Clone)] +pub struct DeleteRole { + trail_id: ObjectID, + owner: IotaAddress, + name: String, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl DeleteRole { + /// Creates a `DeleteRole` transaction builder payload. + pub fn new(trail_id: ObjectID, owner: IotaAddress, name: String, selected_capability_id: Option) -> Self { + Self { + trail_id, + owner, + name, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + AccessOps::delete_role( + client, + self.trail_id, + self.owner, + self.name.clone(), + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for DeleteRole { + type Error = Error; + type Output = RoleDeleted; + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply_with_events( + mut self, + _: &mut IotaTransactionBlockEffects, + events: &mut IotaTransactionBlockEvents, + _: &C, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let event = events + .data + .iter() + .find_map(|data| bcs::from_bytes::(data.bcs.bytes()).ok().map(Into::into)) + .ok_or_else(|| Error::UnexpectedApiResponse("RoleDeleted event not found".to_string()))?; + + Ok(event) + } + + async fn apply(mut self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + unreachable!() + } +} + +/// Transaction that issues a capability for a role. +/// +/// This mints a new capability object for `role` against `trail_id`. Optional issuance restrictions are +/// copied into the capability object and later enforced on-chain. +#[derive(Debug, Clone)] +pub struct IssueCapability { + trail_id: ObjectID, + owner: IotaAddress, + role: String, + options: CapabilityIssueOptions, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl IssueCapability { + /// Creates an `IssueCapability` transaction builder payload. + pub fn new( + trail_id: ObjectID, + owner: IotaAddress, + role: String, + options: CapabilityIssueOptions, + selected_capability_id: Option, + ) -> Self { + Self { + trail_id, + owner, + role, + options, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + AccessOps::issue_capability( + client, + self.trail_id, + self.owner, + self.role.clone(), + self.options.clone(), + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for IssueCapability { + type Error = Error; + type Output = CapabilityIssued; + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply_with_events( + mut self, + _: &mut IotaTransactionBlockEffects, + events: &mut IotaTransactionBlockEvents, + _: &C, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let event = events + .data + .iter() + .find_map(|data| serde_json::from_value::>(data.parsed_json.clone()).ok()) + .ok_or_else(|| Error::UnexpectedApiResponse("CapabilityIssued event not found".to_string()))?; + + Ok(event.data) + } + + async fn apply(mut self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + unreachable!() + } +} + +/// Transaction that revokes a capability. +/// +/// Revocation writes the capability ID into the trail's revoked-capability denylist. Supplying +/// `capability_valid_until` preserves the same expiry boundary later used by denylist cleanup. +#[derive(Debug, Clone)] +pub struct RevokeCapability { + trail_id: ObjectID, + owner: IotaAddress, + capability_id: ObjectID, + capability_valid_until: Option, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl RevokeCapability { + /// Creates a `RevokeCapability` transaction builder payload. + pub fn new( + trail_id: ObjectID, + owner: IotaAddress, + capability_id: ObjectID, + capability_valid_until: Option, + selected_capability_id: Option, + ) -> Self { + Self { + trail_id, + owner, + capability_id, + capability_valid_until, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + AccessOps::revoke_capability( + client, + self.trail_id, + self.owner, + self.capability_id, + self.capability_valid_until, + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for RevokeCapability { + type Error = Error; + type Output = CapabilityRevoked; + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply_with_events( + mut self, + _: &mut IotaTransactionBlockEffects, + events: &mut IotaTransactionBlockEvents, + _: &C, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let event = events + .data + .iter() + .find_map(|data| serde_json::from_value::>(data.parsed_json.clone()).ok()) + .ok_or_else(|| Error::UnexpectedApiResponse("CapabilityRevoked event not found".to_string()))?; + + Ok(event.data) + } + + async fn apply(mut self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + unreachable!() + } +} + +/// Transaction that destroys a capability object. +/// +/// This path is for ordinary capabilities. Initial-admin capabilities must use +/// [`DestroyInitialAdminCapability`] instead. +#[derive(Debug, Clone)] +pub struct DestroyCapability { + trail_id: ObjectID, + owner: IotaAddress, + capability_id: ObjectID, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl DestroyCapability { + /// Creates a `DestroyCapability` transaction builder payload. + pub fn new( + trail_id: ObjectID, + owner: IotaAddress, + capability_id: ObjectID, + selected_capability_id: Option, + ) -> Self { + Self { + trail_id, + owner, + capability_id, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + AccessOps::destroy_capability( + client, + self.trail_id, + self.owner, + self.capability_id, + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for DestroyCapability { + type Error = Error; + type Output = CapabilityDestroyed; + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply_with_events( + mut self, + _: &mut IotaTransactionBlockEffects, + events: &mut IotaTransactionBlockEvents, + _: &C, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let event = events + .data + .iter() + .find_map(|data| serde_json::from_value::>(data.parsed_json.clone()).ok()) + .ok_or_else(|| Error::UnexpectedApiResponse("CapabilityDestroyed event not found".to_string()))?; + + Ok(event.data) + } + + async fn apply(mut self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + unreachable!() + } +} + +// ===== DestroyInitialAdminCapability ===== + +/// Transaction that destroys an initial-admin capability without an auth capability. +/// +/// Initial-admin capability IDs are tracked separately and cannot be removed through the generic destroy path. +#[derive(Debug, Clone)] +pub struct DestroyInitialAdminCapability { + trail_id: ObjectID, + capability_id: ObjectID, + cached_ptb: OnceCell, +} + +impl DestroyInitialAdminCapability { + /// Creates a `DestroyInitialAdminCapability` transaction builder payload. + pub fn new(trail_id: ObjectID, capability_id: ObjectID) -> Self { + Self { + trail_id, + capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + AccessOps::destroy_initial_admin_capability(client, self.trail_id, self.capability_id).await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for DestroyInitialAdminCapability { + type Error = Error; + type Output = CapabilityDestroyed; + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply_with_events( + mut self, + _: &mut IotaTransactionBlockEffects, + events: &mut IotaTransactionBlockEvents, + _: &C, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let event = events + .data + .iter() + .find_map(|data| serde_json::from_value::>(data.parsed_json.clone()).ok()) + .ok_or_else(|| Error::UnexpectedApiResponse("CapabilityDestroyed event not found".to_string()))?; + + Ok(event.data) + } + + async fn apply(mut self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + unreachable!() + } +} + +// ===== RevokeInitialAdminCapability ===== + +/// Transaction that revokes an initial-admin capability. +/// +/// This is the dedicated revoke path for capability IDs recognized as active initial-admin capabilities. +#[derive(Debug, Clone)] +pub struct RevokeInitialAdminCapability { + trail_id: ObjectID, + owner: IotaAddress, + capability_id: ObjectID, + capability_valid_until: Option, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl RevokeInitialAdminCapability { + /// Creates a `RevokeInitialAdminCapability` transaction builder payload. + pub fn new( + trail_id: ObjectID, + owner: IotaAddress, + capability_id: ObjectID, + capability_valid_until: Option, + selected_capability_id: Option, + ) -> Self { + Self { + trail_id, + owner, + capability_id, + capability_valid_until, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + AccessOps::revoke_initial_admin_capability( + client, + self.trail_id, + self.owner, + self.capability_id, + self.capability_valid_until, + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for RevokeInitialAdminCapability { + type Error = Error; + type Output = CapabilityRevoked; + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply_with_events( + mut self, + _: &mut IotaTransactionBlockEffects, + events: &mut IotaTransactionBlockEvents, + _: &C, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let event = events + .data + .iter() + .find_map(|data| serde_json::from_value::>(data.parsed_json.clone()).ok()) + .ok_or_else(|| Error::UnexpectedApiResponse("CapabilityRevoked event not found".to_string()))?; + + Ok(event.data) + } + + async fn apply(mut self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + unreachable!() + } +} + +/// Transaction that cleans up expired revoked-capability entries. +/// +/// This does not revoke additional capabilities. It only prunes denylist entries whose stored expiry has +/// already elapsed. +#[derive(Debug, Clone)] +pub struct CleanupRevokedCapabilities { + trail_id: ObjectID, + owner: IotaAddress, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl CleanupRevokedCapabilities { + /// Creates a `CleanupRevokedCapabilities` transaction builder payload. + pub fn new(trail_id: ObjectID, owner: IotaAddress, selected_capability_id: Option) -> Self { + Self { + trail_id, + owner, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + AccessOps::cleanup_revoked_capabilities(client, self.trail_id, self.owner, self.selected_capability_id).await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for CleanupRevokedCapabilities { + type Error = Error; + type Output = (); + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply(self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + Ok(()) + } +} diff --git a/audit-trail-rs/src/core/builder.rs b/audit-trail-rs/src/core/builder.rs new file mode 100644 index 00000000..3194b0fe --- /dev/null +++ b/audit-trail-rs/src/core/builder.rs @@ -0,0 +1,110 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Builder for trail-creation transactions. + +use std::collections::HashSet; + +use iota_interaction::types::base_types::IotaAddress; +use product_common::transaction::transaction_builder::TransactionBuilder; + +use super::types::{Data, ImmutableMetadata, InitialRecord, LockingConfig}; +use crate::core::create::CreateTrail; + +/// Builder for creating an audit trail. +/// +/// The builder collects the full create-time configuration before it is normalized into the Move `create` +/// call. Any tag list configured here becomes the trail-owned registry that later role-tag and record-tag +/// checks refer to. +#[derive(Debug, Clone, Default)] +pub struct AuditTrailBuilder { + /// Initial admin address that should receive the initial admin capability. + pub admin: Option, + /// Optional initial record created together with the trail. + pub initial_record: Option, + /// Locking rules to apply at creation time. + pub locking_config: LockingConfig, + /// Immutable metadata stored once at creation time. + pub trail_metadata: Option, + /// Mutable metadata stored on the trail object. + pub updatable_metadata: Option, + /// Canonical list of record tags owned by the trail. + pub record_tags: HashSet, +} + +impl AuditTrailBuilder { + /// Sets the full initial record input used during trail creation. + /// + /// When present, the initial record is created as sequence number `0`. + pub fn with_initial_record(mut self, initial_record: InitialRecord) -> Self { + self.initial_record = Some(initial_record); + self + } + + /// Convenience helper for constructing the initial record inline. + pub fn with_initial_record_parts( + mut self, + data: impl Into, + metadata: Option, + tag: Option, + ) -> Self { + self.initial_record = Some(InitialRecord::new(data, metadata, tag)); + self + } + + /// Sets the locking configuration for the trail. + /// + /// This replaces the entire create-time locking configuration. + pub fn with_locking_config(mut self, config: LockingConfig) -> Self { + self.locking_config = config; + self + } + + /// Sets immutable metadata for the trail. + /// + /// Immutable metadata is stored once during creation and cannot be updated later. + pub fn with_trail_metadata(mut self, metadata: ImmutableMetadata) -> Self { + self.trail_metadata = Some(metadata); + self + } + + /// Sets immutable metadata by parts. + pub fn with_trail_metadata_parts(mut self, name: impl Into, description: Option) -> Self { + self.trail_metadata = Some(ImmutableMetadata { + name: name.into(), + description, + }); + self + } + + /// Sets updatable metadata for the trail. + /// + /// This seeds the mutable metadata field that later `update_metadata` calls can replace or clear. + pub fn with_updatable_metadata(mut self, metadata: impl Into) -> Self { + self.updatable_metadata = Some(metadata.into()); + self + } + + /// Sets the canonical list of tags that may be used on records in this trail. + /// + /// The list is deduplicated into the trail-owned tag registry during creation. + pub fn with_record_tags(mut self, tags: I) -> Self + where + I: IntoIterator, + S: Into, + { + self.record_tags = tags.into_iter().map(Into::into).collect(); + self + } + + /// Sets the admin address that receives the initial-admin capability. + pub fn with_admin(mut self, admin: IotaAddress) -> Self { + self.admin = Some(admin); + self + } + + /// Finalizes the builder and creates the trail-creation transaction builder. + pub fn finish(self) -> TransactionBuilder { + TransactionBuilder::new(CreateTrail::new(self)) + } +} diff --git a/audit-trail-rs/src/core/create/mod.rs b/audit-trail-rs/src/core/create/mod.rs new file mode 100644 index 00000000..7dace9ff --- /dev/null +++ b/audit-trail-rs/src/core/create/mod.rs @@ -0,0 +1,9 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Trail-creation transaction types. + +mod operations; +mod transactions; + +pub use transactions::{CreateTrail, TrailCreated}; diff --git a/audit-trail-rs/src/core/create/operations.rs b/audit-trail-rs/src/core/create/operations.rs new file mode 100644 index 00000000..30132c81 --- /dev/null +++ b/audit-trail-rs/src/core/create/operations.rs @@ -0,0 +1,116 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Internal helpers that turn validated builder state into the trail-creation Move call. + +use std::collections::HashSet; + +use iota_interaction::ident_str; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::programmable_transaction_builder::ProgrammableTransactionBuilder; +use iota_interaction::types::transaction::{Argument, ProgrammableTransaction}; + +use crate::core::internal::tx; +use crate::core::types::{Data, ImmutableMetadata, InitialRecord, LockingConfig}; +use crate::error::Error; + +/// Internal namespace for trail-creation transaction construction. +pub(super) struct CreateOps; + +/// Normalized inputs required to build the `main::create` programmable transaction. +/// +/// This keeps the public builder layer separate from the low-level PTB encoding logic. +pub(super) struct CreateTrailArgs { + /// Audit-trail package used for generic type tags and Move calls. + pub audit_trail_package_id: ObjectID, + /// TfComponents package used by locking and capability-related values. + pub tf_components_package_id: ObjectID, + /// Address that should receive the initial admin capability. + pub admin: IotaAddress, + /// Optional first record inserted into the newly created trail. + pub initial_record: Option, + /// Initial locking rules for the trail. + pub locking_config: LockingConfig, + /// Immutable metadata stored at trail creation time. + pub trail_metadata: Option, + /// Mutable metadata slot initialized together with the trail. + pub updatable_metadata: Option, + /// Canonical set of record tags that may be used on the trail. + pub record_tags: HashSet, +} + +impl CreateOps { + /// Builds the programmable transaction that creates a new audit trail. + /// + /// Record tags are sorted before serialization so the resulting wire format is stable across + /// equivalent `HashSet` inputs. + pub(super) fn create_trail(args: CreateTrailArgs) -> Result { + let mut ptb = ProgrammableTransactionBuilder::new(); + let CreateTrailArgs { + audit_trail_package_id, + tf_components_package_id, + admin, + initial_record, + locking_config, + trail_metadata, + updatable_metadata, + record_tags, + } = args; + + let data_tag = Data::tag(audit_trail_package_id); + let initial_record_tag = InitialRecord::tag(audit_trail_package_id); + let initial_record = match initial_record { + Some(initial_record) => { + let initial_record_arg = initial_record.into_ptb(&mut ptb, audit_trail_package_id)?; + tx::option_to_move(Some(initial_record_arg), initial_record_tag, &mut ptb) + } + None => tx::option_to_move(None, initial_record_tag, &mut ptb), + } + .map_err(|e| Error::InvalidArgument(format!("failed to build initial_record option: {e}")))?; + let locking_config = locking_config.to_ptb(&mut ptb, audit_trail_package_id, tf_components_package_id)?; + + let immutable_metadata_tag = ImmutableMetadata::tag(audit_trail_package_id); + + let trail_metadata = match trail_metadata { + Some(metadata) => { + let metadata_arg = metadata.to_ptb(&mut ptb, audit_trail_package_id)?; + tx::option_to_move(Some(metadata_arg), immutable_metadata_tag, &mut ptb) + .map_err(|e| Error::InvalidArgument(format!("failed to build trail_metadata option: {e}")))? + } + None => tx::option_to_move(None, immutable_metadata_tag, &mut ptb) + .map_err(|e| Error::InvalidArgument(format!("failed to build trail_metadata option: {e}")))?, + }; + + let updatable_metadata = tx::ptb_pure(&mut ptb, "updatable_metadata", updatable_metadata)?; + + let record_tags = { + let mut record_tags = record_tags.into_iter().collect::>(); + record_tags.sort(); + tx::ptb_pure(&mut ptb, "record_tags", record_tags)? + }; + let clock = tx::get_clock_ref(&mut ptb); + + let result = ptb.programmable_move_call( + audit_trail_package_id, + ident_str!("main").into(), + ident_str!("create").into(), + vec![data_tag], + vec![ + initial_record, + locking_config, + trail_metadata, + updatable_metadata, + record_tags, + clock, + ], + ); + + let cap = match result { + Argument::Result(idx) => Argument::NestedResult(idx, 0), + _ => unreachable!("programmable_move_call should always return Argument::Result"), + }; + ptb.transfer_arg(admin, cap); + + Ok(ptb.finish()) + } +} diff --git a/audit-trail-rs/src/core/create/transactions.rs b/audit-trail-rs/src/core/create/transactions.rs new file mode 100644 index 00000000..551e2bd7 --- /dev/null +++ b/audit-trail-rs/src/core/create/transactions.rs @@ -0,0 +1,140 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use async_trait::async_trait; +use iota_interaction::OptionalSync; +use iota_interaction::rpc_types::{IotaTransactionBlockEffects, IotaTransactionBlockEvents}; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::transaction::ProgrammableTransaction; +use product_common::core_client::CoreClientReadOnly; +use product_common::transaction::transaction_builder::Transaction; +use tokio::sync::OnceCell; + +use super::operations::{CreateOps, CreateTrailArgs}; +use crate::core::builder::AuditTrailBuilder; +use crate::core::internal::trail as trail_reader; +use crate::core::types::{AuditTrailCreated, Event, OnChainAuditTrail}; +use crate::error::Error; + +/// Output of a successful trail-creation transaction. +#[derive(Debug, Clone)] +pub struct TrailCreated { + /// Newly created trail object ID. + pub trail_id: ObjectID, + /// Address that created the trail. + pub creator: IotaAddress, + /// Millisecond timestamp emitted by the creation event. + pub timestamp: u64, +} + +impl TrailCreated { + /// Loads the newly created trail object from the ledger. + /// + /// # Errors + /// + /// Returns an error if the trail cannot be fetched or deserialized. + pub async fn fetch_audit_trail(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + trail_reader::get_audit_trail(self.trail_id, client).await + } +} + +/// A transaction that creates a new audit trail. +/// +/// The builder state is normalized into the exact Move `create` call shape, including tag-registry setup, +/// optional initial-record creation, and initial-admin capability assignment. +#[derive(Debug, Clone)] +pub struct CreateTrail { + builder: AuditTrailBuilder, + cached_ptb: OnceCell, +} + +impl CreateTrail { + /// Creates a new [`CreateTrail`] instance. + pub fn new(builder: AuditTrailBuilder) -> Self { + Self { + builder, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let AuditTrailBuilder { + admin, + initial_record, + locking_config, + trail_metadata, + updatable_metadata, + record_tags, + } = self.builder.clone(); + + let admin = admin.ok_or_else(|| { + Error::InvalidArgument( + "admin address is required; use `client.create_trail()` with signer or call `with_admin(...)`" + .to_string(), + ) + })?; + let tf_package_id = client + .tf_components_package_id() + .expect("TfComponents package ID should be present for audit trail clients"); + + CreateOps::create_trail(CreateTrailArgs { + audit_trail_package_id: client.package_id(), + tf_components_package_id: tf_package_id, + admin, + initial_record, + locking_config, + trail_metadata, + updatable_metadata, + record_tags, + }) + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for CreateTrail { + type Error = Error; + type Output = TrailCreated; + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply_with_events( + mut self, + _: &mut IotaTransactionBlockEffects, + events: &mut IotaTransactionBlockEvents, + _: &C, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let event = events + .data + .iter() + .find_map(|data| serde_json::from_value::>(data.parsed_json.clone()).ok()) + .ok_or_else(|| Error::UnexpectedApiResponse("AuditTrailCreated event not found".to_string()))?; + + Ok(TrailCreated { + trail_id: event.data.trail_id, + creator: event.data.creator, + timestamp: event.data.timestamp, + }) + } + + async fn apply(mut self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + unreachable!() + } +} diff --git a/audit-trail-rs/src/core/internal/capability.rs b/audit-trail-rs/src/core/internal/capability.rs new file mode 100644 index 00000000..eeb1ca33 --- /dev/null +++ b/audit-trail-rs/src/core/internal/capability.rs @@ -0,0 +1,379 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Capability discovery helpers used by internal transaction builders. +use std::collections::HashSet; + +use iota_interaction::move_types::language_storage::StructTag; +use iota_interaction::rpc_types::{ + IotaObjectDataFilter, IotaObjectDataOptions, IotaObjectResponseQuery, IotaParsedData, +}; +use iota_interaction::types::TypeTag; +use iota_interaction::types::base_types::{IotaAddress, ObjectID, ObjectRef}; +use iota_interaction::types::dynamic_field::DynamicFieldName; +use iota_interaction::types::id::ID; +use iota_interaction::{IotaClientTrait, OptionalSync}; +use product_common::core_client::CoreClientReadOnly; + +use super::{linked_table, tx}; +use crate::core::types::{Capability, OnChainAuditTrail, Permission}; +use crate::error::Error; + +/// Finds an owned capability object that grants `permission` for `trail_id` and returns its object +/// reference. +/// +/// The lookup is restricted to roles on `trail` that include the requested permission. +pub(crate) async fn find_capable_cap( + client: &C, + owner: IotaAddress, + trail_id: ObjectID, + trail: &OnChainAuditTrail, + permission: Permission, +) -> Result +where + C: CoreClientReadOnly + OptionalSync, +{ + let valid_roles: HashSet = trail + .roles + .roles + .iter() + .filter(|(_, role)| role.permissions.contains(&permission)) + .map(|(name, _)| name.clone()) + .collect(); + + let cap = find_owned_capability(client, owner, trail, |cap| { + cap.matches_target_and_role(trail_id, &valid_roles) + }) + .await? + .ok_or_else(|| { + Error::InvalidArgument(format!( + "no capability with {:?} permission found for owner {owner} and trail {trail_id}", + permission + )) + })?; + + let object_id = *cap.id.object_id(); + tx::get_object_ref_by_id(client, &object_id).await +} + +/// Searches the owner's capability objects and returns the first one matching `predicate`. +/// +/// Revoked capabilities are filtered out before the predicate is applied to the remaining +/// candidates. +pub(crate) async fn find_owned_capability( + client: &C, + owner: IotaAddress, + trail: &OnChainAuditTrail, + predicate: P, +) -> Result, Error> +where + C: CoreClientReadOnly + OptionalSync, + P: Fn(&Capability) -> bool + Send, +{ + let revoked_capability_ids = revoked_capability_ids(client, trail).await?; + let now_ms = now_ms(); + let tf_components_package_id = client + .tf_components_package_id() + .expect("TfComponents package ID should be present for audit trail clients"); + let capability_struct_tag: StructTag = Capability::type_tag(tf_components_package_id) + .to_string() + .parse() + .expect("capability type tag is a valid struct tag"); + let query = IotaObjectResponseQuery::new( + Some(IotaObjectDataFilter::StructType(capability_struct_tag)), + Some(IotaObjectDataOptions::default().with_content()), + ); + + let mut cursor = None; + loop { + let mut page = client + .client_adapter() + .read_api() + .get_owned_objects(owner, Some(query.clone()), cursor, Some(25)) + .await + .map_err(|e| Error::RpcError(e.to_string()))?; + + let maybe_cap = std::mem::take(&mut page.data) + .into_iter() + .filter_map(|res| res.data) + .filter_map(|data| data.content) + .filter_map(|obj_data| { + let IotaParsedData::MoveObject(move_object) = obj_data else { + unreachable!() + }; + serde_json::from_value(move_object.fields.to_json_value()).ok() + }) + .find(|cap| capability_matches(cap, owner, now_ms, &revoked_capability_ids, &predicate)); + cursor = page.next_cursor; + + if maybe_cap.is_some() { + return Ok(maybe_cap); + } + if !page.has_next_page { + break; + } + } + + Ok(None) +} + +/// Traverses the revoked-capabilities linked table and collects every revoked capability ID. +/// +/// The traversal validates that the linked-table shape is acyclic and that the number of visited +/// entries matches the size recorded on-chain. +async fn revoked_capability_ids(client: &C, trail: &OnChainAuditTrail) -> Result, Error> +where + C: CoreClientReadOnly + OptionalSync, +{ + let table = &trail.roles.revoked_capabilities; + let expected = table.size as usize; + let mut cursor = table.head; + let mut keys = HashSet::with_capacity(expected); + + while let Some(key) = cursor { + if !keys.insert(key) { + return Err(Error::UnexpectedApiResponse(format!( + "cycle detected while traversing linked-table {table_id}; repeated key {key}", + table_id = table.id + ))); + } + + let node = linked_table::fetch_node::<_, ObjectID, u64>( + client, + table.id, + DynamicFieldName { + type_: TypeTag::Struct(Box::new(ID::type_())), + value: serde_json::Value::String(IotaAddress::from(key).to_string()), + }, + ) + .await?; + cursor = node.next; + } + + if keys.len() != expected { + return Err(Error::UnexpectedApiResponse(format!( + "linked-table traversal mismatch; expected {expected} entries, got {}", + keys.len() + ))); + } + + Ok(keys) +} + +/// Returns whether a capability is a usable match for the current owner and predicate. +/// +/// A capability only matches when it satisfies the caller-provided predicate, has not been +/// revoked, and is either unbound or explicitly issued to `owner`. +fn capability_matches

( + cap: &Capability, + owner: IotaAddress, + now_ms: u64, + revoked_capability_ids: &HashSet, + predicate: &P, +) -> bool +where + P: Fn(&Capability) -> bool, +{ + predicate(cap) + && !revoked_capability_ids.contains(cap.id.object_id()) + && cap.issued_to.map(|issued_to| issued_to == owner).unwrap_or(true) + && cap.valid_from.is_none_or(|valid_from| now_ms >= valid_from) + && cap.valid_until.is_none_or(|valid_until| now_ms <= valid_until) +} + +/// Finds an owned capability for adding a tagged record. +/// +/// Tagged writes have stricter lookup rules than ordinary permission-based +/// operations: the selected role must grant `AddRecord` and its configured +/// `RoleTags` must allow the requested record tag. +pub(crate) async fn find_capable_cap_for_tag( + client: &C, + owner: IotaAddress, + trail_id: ObjectID, + trail: &OnChainAuditTrail, + tag: &str, +) -> Result +where + C: CoreClientReadOnly + OptionalSync, +{ + let valid_roles = trail + .roles + .roles + .iter() + .filter(|(_, role)| { + role.permissions.contains(&Permission::AddRecord) + && role.data.as_ref().is_some_and(|record_tags| record_tags.allows(tag)) + }) + .map(|(name, _)| name.clone()) + .collect::>(); + + let cap = find_owned_capability(client, owner, trail, |cap| { + cap.target_key == trail_id && valid_roles.contains(&cap.role) + }) + .await? + .ok_or_else(|| { + Error::InvalidArgument(format!( + "no capability with {:?} permission and record tag '{tag}' found for owner {owner} and trail {trail_id}", + Permission::AddRecord + )) + })?; + + let object_id = *cap.id.object_id(); + tx::get_object_ref_by_id(client, &object_id).await +} + +/// Returns the current wall-clock time as milliseconds since the Unix epoch. +/// +/// Uses `std::time::SystemTime` on native targets and `js_sys::Date::now()` on +/// `wasm32`, where `SystemTime` is not available. +pub(crate) fn now_ms() -> u64 { + #[cfg(not(target_arch = "wasm32"))] + { + use std::time::{SystemTime, UNIX_EPOCH}; + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_millis() as u64 + } + #[cfg(target_arch = "wasm32")] + { + js_sys::Date::now() as u64 + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use iota_interaction::types::base_types::{IotaAddress, ObjectID, dbg_object_id}; + use iota_interaction::types::id::UID; + + use super::capability_matches; + use crate::core::types::Capability; + + #[test] + fn capability_matches_skips_revoked_caps() { + let owner = IotaAddress::random_for_testing_only(); + let trail_id = dbg_object_id(1); + let revoked_cap_id = dbg_object_id(2); + let valid_cap_id = dbg_object_id(3); + let valid_roles = HashSet::from(["Writer".to_string()]); + let revoked_ids = HashSet::from([revoked_cap_id]); + + let revoked_cap = make_capability(revoked_cap_id, trail_id, "Writer", None, None, None); + let valid_cap = make_capability(valid_cap_id, trail_id, "Writer", None, None, None); + + assert!(!capability_matches(&revoked_cap, owner, 0, &revoked_ids, &|cap| cap + .matches_target_and_role(trail_id, &valid_roles))); + assert!(capability_matches(&valid_cap, owner, 0, &revoked_ids, &|cap| cap + .matches_target_and_role(trail_id, &valid_roles))); + } + + #[test] + fn capability_matches_skips_issued_to_mismatch() { + let owner = IotaAddress::random_for_testing_only(); + let other_owner = IotaAddress::random_for_testing_only(); + let trail_id = dbg_object_id(4); + let valid_roles = HashSet::from(["Writer".to_string()]); + let cap = make_capability(dbg_object_id(5), trail_id, "Writer", Some(other_owner), None, None); + + assert!(!capability_matches(&cap, owner, 0, &HashSet::new(), &|candidate| { + candidate.matches_target_and_role(trail_id, &valid_roles) + })); + } + + #[test] + fn capability_matches_skips_caps_before_valid_from() { + let owner = IotaAddress::random_for_testing_only(); + let trail_id = dbg_object_id(6); + let valid_roles = HashSet::from(["Writer".to_string()]); + let cap = make_capability(dbg_object_id(7), trail_id, "Writer", None, Some(2_000), None); + + assert!(!capability_matches(&cap, owner, 1_999, &HashSet::new(), &|candidate| { + candidate.matches_target_and_role(trail_id, &valid_roles) + })); + assert!(capability_matches(&cap, owner, 2_000, &HashSet::new(), &|candidate| { + candidate.matches_target_and_role(trail_id, &valid_roles) + })); + } + + #[test] + fn capability_matches_skips_caps_after_valid_until() { + let owner = IotaAddress::random_for_testing_only(); + let trail_id = dbg_object_id(8); + let valid_roles = HashSet::from(["Writer".to_string()]); + let cap = make_capability(dbg_object_id(9), trail_id, "Writer", None, None, Some(2_000)); + + assert!(capability_matches(&cap, owner, 2_000, &HashSet::new(), &|candidate| { + candidate.matches_target_and_role(trail_id, &valid_roles) + })); + assert!(!capability_matches(&cap, owner, 2_001, &HashSet::new(), &|candidate| { + candidate.matches_target_and_role(trail_id, &valid_roles) + })); + } + + #[test] + fn capability_matches_accepts_unbound_capability_for_matching_role() { + let owner = IotaAddress::random_for_testing_only(); + let trail_id = dbg_object_id(6); + let valid_roles = HashSet::from(["Writer".to_string()]); + let cap = make_capability(dbg_object_id(7), trail_id, "Writer", None, None, None); + + assert!(capability_matches(&cap, owner, 0, &HashSet::new(), &|candidate| { + candidate.matches_target_and_role(trail_id, &valid_roles) + })); + } + + #[test] + fn capability_matches_rejects_non_matching_role() { + let owner = IotaAddress::random_for_testing_only(); + let trail_id = dbg_object_id(8); + let valid_roles = HashSet::from(["Writer".to_string()]); + let cap = make_capability(dbg_object_id(9), trail_id, "Reader", None, None, None); + + assert!(!capability_matches(&cap, owner, 0, &HashSet::new(), &|candidate| { + candidate.matches_target_and_role(trail_id, &valid_roles) + })); + } + + #[test] + fn capability_matches_honors_time_constraints() { + let owner = IotaAddress::random_for_testing_only(); + let trail_id = dbg_object_id(10); + let valid_roles = HashSet::from(["Writer".to_string()]); + let cap = make_capability( + dbg_object_id(11), + trail_id, + "Writer", + Some(owner), + Some(1_700_000_000_000), + Some(1_700_000_005_000), + ); + + assert!(capability_matches( + &cap, + owner, + 1_700_000_000_000, + &HashSet::new(), + &|candidate| { candidate.matches_target_and_role(trail_id, &valid_roles) } + )); + } + + fn make_capability( + id: ObjectID, + trail_id: ObjectID, + role: &str, + issued_to: Option, + valid_from: Option, + valid_until: Option, + ) -> Capability { + Capability { + id: UID::new(id), + target_key: trail_id, + role: role.to_string(), + issued_to, + valid_from, + valid_until, + } + } +} diff --git a/audit-trail-rs/src/core/internal/linked_table.rs b/audit-trail-rs/src/core/internal/linked_table.rs new file mode 100644 index 00000000..7f3f4c85 --- /dev/null +++ b/audit-trail-rs/src/core/internal/linked_table.rs @@ -0,0 +1,65 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Helpers for reading Move `LinkedTable` nodes through dynamic fields. + +use iota_interaction::rpc_types::{IotaData as _, IotaObjectDataOptions}; +use iota_interaction::types::base_types::ObjectID; +use iota_interaction::types::collection_types::LinkedTableNode; +use iota_interaction::types::dynamic_field::{DynamicFieldName, Field}; +use iota_interaction::{IotaClientTrait, OptionalSync}; +use product_common::core_client::CoreClientReadOnly; +use serde::de::DeserializeOwned; + +use crate::error::Error; + +/// Fetches and decodes a single linked-table node stored as a dynamic field under `table_id`. +/// +/// The caller provides the fully encoded Move field name so this helper can stay generic over the +/// linked-table key and value types. +pub(crate) async fn fetch_node( + client: &C, + table_id: ObjectID, + name: DynamicFieldName, +) -> Result, Error> +where + C: CoreClientReadOnly + OptionalSync, + K: DeserializeOwned, + V: DeserializeOwned, +{ + let name_display = name.to_string(); + let data = client + .client_adapter() + .read_api() + .get_dynamic_field_object_v2(table_id, name, Some(IotaObjectDataOptions::bcs_lossless())) + .await + .map_err(|err| Error::RpcError(err.to_string()))? + .data + .ok_or_else(|| { + Error::UnexpectedApiResponse(format!( + "dynamic-field object not found for linked-table id {table_id} and name {name_display}" + )) + })?; + + let field: Field> = data + .bcs + .ok_or_else(|| { + Error::UnexpectedApiResponse(format!( + "linked-table node {} missing bcs object content", + data.object_id + )) + })? + .try_into_move() + .ok_or_else(|| { + Error::UnexpectedApiResponse(format!( + "linked-table node {} bcs content is not a move object", + data.object_id + )) + })? + .deserialize() + .map_err(|err| { + Error::UnexpectedApiResponse(format!("failed to decode linked-table node {}; {err}", data.object_id)) + })?; + + Ok(field.value) +} diff --git a/audit-trail-rs/src/core/internal/mod.rs b/audit-trail-rs/src/core/internal/mod.rs new file mode 100644 index 00000000..c4409bcb --- /dev/null +++ b/audit-trail-rs/src/core/internal/mod.rs @@ -0,0 +1,16 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Internal helpers used to bridge public audit-trail APIs to low-level IOTA object access and +//! programmable transaction construction. + +/// Capability lookup helpers for trail-scoped permission checks. +pub(crate) mod capability; +/// Linked-table decoding helpers for traversing on-chain Move collections. +pub(crate) mod linked_table; +/// Serde adapters for Move collection types that are exposed as standard Rust collections. +pub(crate) mod move_collections; +/// Raw trail fetch and decode helpers. +pub(crate) mod trail; +/// Common programmable-transaction building helpers. +pub(crate) mod tx; diff --git a/audit-trail-rs/src/core/internal/move_collections.rs b/audit-trail-rs/src/core/internal/move_collections.rs new file mode 100644 index 00000000..ba31be21 --- /dev/null +++ b/audit-trail-rs/src/core/internal/move_collections.rs @@ -0,0 +1,39 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Serde adapters for decoding Move collection wrappers into standard Rust collections. + +use std::collections::{HashMap, HashSet}; +use std::fmt::Debug; +use std::hash::Hash; + +use iota_interaction::types::collection_types::{VecMap, VecSet}; +use serde::{Deserialize, Deserializer}; + +/// Deserializes a Move `VecMap` into a Rust [`HashMap`]. +/// +/// This adapter is used on public domain types that expose map-like data as idiomatic Rust +/// collections while preserving the on-chain wire format. +pub(crate) fn deserialize_vec_map<'de, D, K, V>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, + K: Deserialize<'de> + Eq + Hash + Debug, + V: Deserialize<'de> + Debug, +{ + let vec_map = VecMap::::deserialize(deserializer)?; + Ok(vec_map + .contents + .into_iter() + .map(|entry| (entry.key, entry.value)) + .collect()) +} + +/// Deserializes a Move `VecSet` into a Rust [`HashSet`]. +pub(crate) fn deserialize_vec_set<'de, D, T>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, + T: Deserialize<'de> + Eq + Hash, +{ + let vec_set = VecSet::::deserialize(deserializer)?; + Ok(vec_set.contents.into_iter().collect()) +} diff --git a/audit-trail-rs/src/core/internal/trail.rs b/audit-trail-rs/src/core/internal/trail.rs new file mode 100644 index 00000000..d90861b8 --- /dev/null +++ b/audit-trail-rs/src/core/internal/trail.rs @@ -0,0 +1,34 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Helpers for fetching and decoding the shared on-chain audit-trail object. + +use iota_interaction::rpc_types::{IotaData as _, IotaObjectDataOptions}; +use iota_interaction::types::base_types::ObjectID; +use iota_interaction::{IotaClientTrait, OptionalSync}; +use product_common::core_client::CoreClientReadOnly; + +use crate::core::types::OnChainAuditTrail; +use crate::error::Error; + +/// Loads the shared audit-trail object and decodes it into [`OnChainAuditTrail`]. +pub(crate) async fn get_audit_trail(trail_id: ObjectID, client: &C) -> Result +where + C: CoreClientReadOnly + OptionalSync, +{ + let data = client + .client_adapter() + .read_api() + .get_object_with_options(trail_id, IotaObjectDataOptions::bcs_lossless()) + .await + .map_err(|e| Error::UnexpectedApiResponse(format!("failed to fetch trail {} object; {e}", trail_id)))? + .data + .ok_or_else(|| Error::UnexpectedApiResponse(format!("trail {} data not found", trail_id)))?; + + data.bcs + .ok_or_else(|| Error::UnexpectedApiResponse(format!("trail {} missing bcs object content", trail_id)))? + .try_into_move() + .ok_or_else(|| Error::UnexpectedApiResponse(format!("trail {} bcs content is not a move object", trail_id)))? + .deserialize() + .map_err(|e| Error::UnexpectedApiResponse(format!("failed to decode trail {} bcs data; {e}", trail_id))) +} diff --git a/audit-trail-rs/src/core/internal/tx.rs b/audit-trail-rs/src/core/internal/tx.rs new file mode 100644 index 00000000..8680c4d0 --- /dev/null +++ b/audit-trail-rs/src/core/internal/tx.rs @@ -0,0 +1,255 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Shared transaction-building helpers used by the internal audit-trail operations. + +use std::str::FromStr; + +use iota_interaction::rpc_types::IotaObjectDataOptions; +use iota_interaction::types::base_types::{IotaAddress, ObjectID, ObjectRef, STD_OPTION_MODULE_NAME}; +use iota_interaction::types::object::Owner; +use iota_interaction::types::programmable_transaction_builder::{ + ProgrammableTransactionBuilder as Ptb, ProgrammableTransactionBuilder, +}; +use iota_interaction::types::transaction::{Argument, ObjectArg, ProgrammableTransaction}; +use iota_interaction::types::{ + IOTA_CLOCK_OBJECT_ID, IOTA_CLOCK_OBJECT_SHARED_VERSION, Identifier, MOVE_STDLIB_PACKAGE_ID, TypeTag, +}; +use iota_interaction::{IotaClientTrait, OptionalSync, ident_str}; +use product_common::core_client::CoreClientReadOnly; +use serde::Serialize; + +use super::{capability, trail as trail_reader}; +use crate::core::types::Permission; +use crate::error::Error; + +/// Returns the canonical immutable clock object argument. +pub(crate) fn get_clock_ref(ptb: &mut Ptb) -> Argument { + ptb.obj(ObjectArg::SharedObject { + id: IOTA_CLOCK_OBJECT_ID, + initial_shared_version: IOTA_CLOCK_OBJECT_SHARED_VERSION, + mutable: false, + }) + .expect("network has a singleton clock instantiated") +} + +/// Serializes a pure programmable-transaction argument and annotates serialization failures with +/// the logical argument name. +pub(crate) fn ptb_pure(ptb: &mut Ptb, name: &str, value: T) -> Result +where + T: Serialize + core::fmt::Debug, +{ + ptb.pure(&value).map_err(|err| { + Error::InvalidArgument(format!( + r"could not serialize pure value {name} with value {value:?}; {err}" + )) + }) +} + +/// Wraps an optional argument into the corresponding Move `std::option::Option` value. +pub(crate) fn option_to_move( + option: Option, + tag: TypeTag, + ptb: &mut ProgrammableTransactionBuilder, +) -> Result { + let arg = if let Some(t) = option { + ptb.programmable_move_call( + MOVE_STDLIB_PACKAGE_ID, + STD_OPTION_MODULE_NAME.into(), + ident_str!("some").into(), + vec![tag], + vec![t], + ) + } else { + ptb.programmable_move_call( + MOVE_STDLIB_PACKAGE_ID, + STD_OPTION_MODULE_NAME.into(), + ident_str!("none").into(), + vec![tag], + vec![], + ) + }; + + Ok(arg) +} + +/// Builds a writable trail transaction after resolving both the trail object and a matching +/// capability for `owner`. +pub(crate) async fn build_trail_transaction( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + permission: Permission, + selected_capability_id: Option, + method: impl AsRef, + additional_args: F, +) -> Result +where + F: FnOnce(&mut ProgrammableTransactionBuilder, &TypeTag) -> Result, Error>, + C: CoreClientReadOnly + OptionalSync, +{ + let cap_ref = if let Some(capability_id) = selected_capability_id { + get_object_ref_by_id(client, &capability_id).await? + } else { + let trail = trail_reader::get_audit_trail(trail_id, client).await?; + capability::find_capable_cap(client, owner, trail_id, &trail, permission).await? + }; + build_trail_transaction_with_cap_ref(client, trail_id, cap_ref, method, additional_args).await +} + +/// Builds a writable trail transaction when the caller already has the capability object +/// reference. +pub(crate) async fn build_trail_transaction_with_cap_ref( + client: &C, + trail_id: ObjectID, + cap_ref: ObjectRef, + method: impl AsRef, + additional_args: F, +) -> Result +where + F: FnOnce(&mut ProgrammableTransactionBuilder, &TypeTag) -> Result, Error>, + C: CoreClientReadOnly + OptionalSync, +{ + let mut ptb = ProgrammableTransactionBuilder::new(); + + let type_tag = get_type_tag(client, &trail_id).await?; + let tag = vec![type_tag.clone()]; + let trail_arg = get_shared_object_arg(client, &trail_id, true).await?; + + let mut args = vec![ + ptb.obj(trail_arg) + .map_err(|e| Error::InvalidArgument(format!("Failed to create trail argument: {e}")))?, + ptb.obj(ObjectArg::ImmOrOwnedObject(cap_ref)) + .map_err(|e| Error::InvalidArgument(format!("Failed to create cap argument: {e}")))?, + ]; + + args.extend(additional_args(&mut ptb, &type_tag)?); + + let function = Identifier::from_str(method.as_ref()) + .map_err(|e| Error::InvalidArgument(format!("Invalid method name '{}': {e}", method.as_ref())))?; + + ptb.programmable_move_call(client.package_id(), ident_str!("main").into(), function, tag, args); + + Ok(ptb.finish()) +} + +/// Builds a read-only trail transaction that borrows the shared trail object immutably. +pub(crate) async fn build_read_only_transaction( + client: &C, + trail_id: ObjectID, + method: impl AsRef, + additional_args: F, +) -> Result +where + F: FnOnce(&mut ProgrammableTransactionBuilder) -> Result, Error>, + C: CoreClientReadOnly + OptionalSync, +{ + let mut ptb = ProgrammableTransactionBuilder::new(); + + let tag = vec![get_type_tag(client, &trail_id).await?]; + let trail_arg = get_shared_object_arg(client, &trail_id, false).await?; + + let mut args = vec![ + ptb.obj(trail_arg) + .map_err(|e| Error::InvalidArgument(format!("Failed to create trail argument: {e}")))?, + ]; + + args.extend(additional_args(&mut ptb)?); + + let function = Identifier::from_str(method.as_ref()) + .map_err(|e| Error::InvalidArgument(format!("Invalid method name '{}': {e}", method.as_ref())))?; + + ptb.programmable_move_call(client.package_id(), ident_str!("main").into(), function, tag, args); + + Ok(ptb.finish()) +} + +/// Extracts the generic record payload type from the on-chain trail object type. +/// +/// Audit-trail Move entry points are generic over the record payload type, so transaction builders +/// need this type tag to invoke the correct specialization. +pub(crate) async fn get_type_tag(client: &C, object_id: &ObjectID) -> Result +where + C: CoreClientReadOnly + OptionalSync, +{ + let object_response = client + .client_adapter() + .read_api() + .get_object_with_options(*object_id, IotaObjectDataOptions::new().with_type()) + .await + .map_err(|err| Error::FailedToParseTag(format!("Failed to get object: {err}")))?; + + let object_data = object_response + .data + .ok_or_else(|| Error::FailedToParseTag(format!("Object {object_id} not found")))?; + + let full_type_str = object_data + .object_type() + .map_err(|e| Error::FailedToParseTag(format!("Failed to get object type: {e}")))? + .to_string(); + + let type_param_str = parse_type(&full_type_str)?; + + TypeTag::from_str(&type_param_str) + .map_err(|e| Error::FailedToParseTag(format!("Failed to parse tag '{type_param_str}': {e}"))) +} + +/// Extracts the innermost generic type parameter from a full Move object type string. +fn parse_type(full_type: &str) -> Result { + if let (Some(start), Some(end)) = (full_type.find('<'), full_type.rfind('>')) { + Ok(full_type[start + 1..end].to_string()) + } else { + Err(Error::FailedToParseTag(format!( + "Could not parse type parameter from {full_type}" + ))) + } +} + +/// Fetches the current object reference for `object_id`. +pub(crate) async fn get_object_ref_by_id( + client: &impl CoreClientReadOnly, + object_id: &ObjectID, +) -> Result { + let res = client + .client_adapter() + .read_api() + .get_object_with_options(*object_id, IotaObjectDataOptions::new().with_content()) + .await + .map_err(|err| Error::GenericError(format!("Failed to get object: {err}")))?; + + let Some(data) = res.data else { + return Err(Error::InvalidArgument("no data found".to_string())); + }; + + Ok(data.object_ref()) +} + +/// Resolves a shared object argument for use in a programmable transaction. +/// +/// This validates that the fetched object is shared and returns the appropriate mutability flag for +/// the planned call. +pub(crate) async fn get_shared_object_arg( + client: &impl CoreClientReadOnly, + object_id: &ObjectID, + mutable: bool, +) -> Result { + let res = client + .client_adapter() + .read_api() + .get_object_with_options(*object_id, IotaObjectDataOptions::new().with_owner()) + .await + .map_err(|err| Error::GenericError(format!("Failed to get object: {err}")))?; + + let Some(data) = res.data else { + return Err(Error::InvalidArgument("no data found".to_string())); + }; + + match data.owner { + Some(Owner::Shared { initial_shared_version }) => Ok(ObjectArg::SharedObject { + id: *object_id, + initial_shared_version, + mutable, + }), + _ => Err(Error::InvalidArgument("object is not shared".to_string())), + } +} diff --git a/audit-trail-rs/src/core/locking/mod.rs b/audit-trail-rs/src/core/locking/mod.rs new file mode 100644 index 00000000..3e8cde62 --- /dev/null +++ b/audit-trail-rs/src/core/locking/mod.rs @@ -0,0 +1,124 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Locking configuration APIs for audit trails. + +use iota_interaction::types::base_types::ObjectID; +use iota_interaction::{IotaKeySignature, OptionalSync}; +use product_common::core_client::CoreClient; +use product_common::transaction::transaction_builder::TransactionBuilder; +use secret_storage::Signer; + +use crate::core::trail::{AuditTrailFull, AuditTrailReadOnly}; +use crate::core::types::{LockingConfig, LockingWindow, TimeLock}; +use crate::error::Error; + +mod operations; +mod transactions; + +pub use transactions::{UpdateDeleteRecordWindow, UpdateDeleteTrailLock, UpdateLockingConfig, UpdateWriteLock}; + +use self::operations::LockingOps; + +/// Locking API scoped to a specific trail. +/// +/// This handle updates the trail's locking configuration and queries whether an individual record is currently +/// locked against deletion. +#[derive(Debug, Clone)] +pub struct TrailLocking<'a, C> { + pub(crate) client: &'a C, + pub(crate) trail_id: ObjectID, + pub(crate) selected_capability_id: Option, +} + +impl<'a, C> TrailLocking<'a, C> { + pub(crate) fn new(client: &'a C, trail_id: ObjectID, selected_capability_id: Option) -> Self { + Self { + client, + trail_id, + selected_capability_id, + } + } + + /// Uses the provided capability as the auth capability for subsequent write operations. + pub fn using_capability(mut self, capability_id: ObjectID) -> Self { + self.selected_capability_id = Some(capability_id); + self + } + + /// Replaces the full locking configuration for the trail. + /// + /// This overwrites all three locking dimensions at once: record delete window, trail delete lock, and + /// write lock. + pub fn update(&self, config: LockingConfig) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(UpdateLockingConfig::new( + self.trail_id, + owner, + config, + self.selected_capability_id, + )) + } + + /// Updates only the delete-record window. + pub fn update_delete_record_window(&self, window: LockingWindow) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(UpdateDeleteRecordWindow::new( + self.trail_id, + owner, + window, + self.selected_capability_id, + )) + } + + /// Updates only the delete-trail time lock. + pub fn update_delete_trail_lock(&self, lock: TimeLock) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(UpdateDeleteTrailLock::new( + self.trail_id, + owner, + lock, + self.selected_capability_id, + )) + } + + /// Updates only the write lock. + pub fn update_write_lock(&self, lock: TimeLock) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(UpdateWriteLock::new( + self.trail_id, + owner, + lock, + self.selected_capability_id, + )) + } + + /// Returns `true` when the given record is currently locked against deletion. + /// + /// # Errors + /// + /// Returns an error if the lock state cannot be computed from the current on-chain state. + pub async fn is_record_locked(&self, sequence_number: u64) -> Result + where + C: AuditTrailReadOnly, + { + let tx = LockingOps::is_record_locked(self.client, self.trail_id, sequence_number).await?; + self.client.execute_read_only_transaction(tx).await + } +} diff --git a/audit-trail-rs/src/core/locking/operations.rs b/audit-trail-rs/src/core/locking/operations.rs new file mode 100644 index 00000000..c34b81af --- /dev/null +++ b/audit-trail-rs/src/core/locking/operations.rs @@ -0,0 +1,161 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Internal helpers that build locking-related programmable transactions. +//! +//! These helpers serialize locking values into the Move shapes used by the trail package and select the +//! corresponding locking-update permissions. + +use iota_interaction::OptionalSync; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::transaction::ProgrammableTransaction; +use product_common::core_client::CoreClientReadOnly; + +use crate::core::internal::tx; +use crate::core::types::{LockingConfig, LockingWindow, Permission, TimeLock}; +use crate::error::Error; + +/// Internal namespace for locking transaction construction. +pub(super) struct LockingOps; + +impl LockingOps { + /// Builds the `update_locking_config` call. + pub(super) async fn update_locking_config( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + new_config: LockingConfig, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let tf_components_package_id = client + .tf_components_package_id() + .expect("TfComponents package ID should be present for audit trail clients"); + + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::UpdateLockingConfig, + selected_capability_id, + "update_locking_config", + |ptb, _| { + let config = new_config.to_ptb(ptb, client.package_id(), tf_components_package_id)?; + let clock = tx::get_clock_ref(ptb); + + Ok(vec![config, clock]) + }, + ) + .await + } + + /// Builds the `update_delete_record_window` call. + pub(super) async fn update_delete_record_window( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + new_delete_record_window: LockingWindow, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::UpdateLockingConfigForDeleteRecord, + selected_capability_id, + "update_delete_record_window", + |ptb, _| { + let window = new_delete_record_window.to_ptb(ptb, client.package_id())?; + let clock = tx::get_clock_ref(ptb); + + Ok(vec![window, clock]) + }, + ) + .await + } + + /// Builds the `update_delete_trail_lock` call. + pub(super) async fn update_delete_trail_lock( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + new_delete_trail_lock: TimeLock, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let tf_components_package_id = client + .tf_components_package_id() + .expect("TfComponents package ID should be present for audit trail clients"); + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::UpdateLockingConfigForDeleteTrail, + selected_capability_id, + "update_delete_trail_lock", + |ptb, _| { + let delete_trail_lock = new_delete_trail_lock.to_ptb(ptb, tf_components_package_id)?; + let clock = tx::get_clock_ref(ptb); + + Ok(vec![delete_trail_lock, clock]) + }, + ) + .await + } + + /// Builds the `update_write_lock` call. + pub(super) async fn update_write_lock( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + new_write_lock: TimeLock, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let tf_components_package_id = client + .tf_components_package_id() + .expect("TfComponents package ID should be present for audit trail clients"); + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::UpdateLockingConfigForWrite, + selected_capability_id, + "update_write_lock", + |ptb, _| { + let write_lock = new_write_lock.to_ptb(ptb, tf_components_package_id)?; + let clock = tx::get_clock_ref(ptb); + + Ok(vec![write_lock, clock]) + }, + ) + .await + } + + /// Builds the read-only `is_record_locked` call. + pub(super) async fn is_record_locked( + client: &C, + trail_id: ObjectID, + sequence_number: u64, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_read_only_transaction(client, trail_id, "is_record_locked", |ptb| { + let sequence_number = tx::ptb_pure(ptb, "sequence_number", sequence_number)?; + let clock = tx::get_clock_ref(ptb); + + Ok(vec![sequence_number, clock]) + }) + .await + } +} diff --git a/audit-trail-rs/src/core/locking/transactions.rs b/audit-trail-rs/src/core/locking/transactions.rs new file mode 100644 index 00000000..22e4e886 --- /dev/null +++ b/audit-trail-rs/src/core/locking/transactions.rs @@ -0,0 +1,277 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Transaction payloads for locking updates. + +use async_trait::async_trait; +use iota_interaction::OptionalSync; +use iota_interaction::rpc_types::IotaTransactionBlockEffects; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::transaction::ProgrammableTransaction; +use product_common::core_client::CoreClientReadOnly; +use product_common::transaction::transaction_builder::Transaction; +use tokio::sync::OnceCell; + +use super::operations::LockingOps; +use crate::core::types::{LockingConfig, LockingWindow, TimeLock}; +use crate::error::Error; + +/// Transaction that replaces the full locking configuration. +/// +/// This writes the full `LockingConfig` object and therefore updates all locking dimensions in one call. +#[derive(Debug, Clone)] +pub struct UpdateLockingConfig { + trail_id: ObjectID, + owner: IotaAddress, + config: LockingConfig, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl UpdateLockingConfig { + /// Creates an `UpdateLockingConfig` transaction builder payload. + pub fn new( + trail_id: ObjectID, + owner: IotaAddress, + config: LockingConfig, + selected_capability_id: Option, + ) -> Self { + Self { + trail_id, + owner, + config, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + LockingOps::update_locking_config( + client, + self.trail_id, + self.owner, + self.config.clone(), + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for UpdateLockingConfig { + type Error = Error; + type Output = (); + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply(self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + Ok(()) + } +} + +/// Transaction that updates the delete-record window. +/// +/// This updates only the rule that governs when individual records may be deleted. +#[derive(Debug, Clone)] +pub struct UpdateDeleteRecordWindow { + trail_id: ObjectID, + owner: IotaAddress, + window: LockingWindow, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl UpdateDeleteRecordWindow { + /// Creates an `UpdateDeleteRecordWindow` transaction builder payload. + pub fn new( + trail_id: ObjectID, + owner: IotaAddress, + window: LockingWindow, + selected_capability_id: Option, + ) -> Self { + Self { + trail_id, + owner, + window, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + LockingOps::update_delete_record_window( + client, + self.trail_id, + self.owner, + self.window.clone(), + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for UpdateDeleteRecordWindow { + type Error = Error; + type Output = (); + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply(self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + Ok(()) + } +} + +/// Transaction that updates the delete-trail lock. +/// +/// This updates only the time lock guarding deletion of the entire trail object. +#[derive(Debug, Clone)] +pub struct UpdateDeleteTrailLock { + trail_id: ObjectID, + owner: IotaAddress, + lock: TimeLock, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl UpdateDeleteTrailLock { + /// Creates an `UpdateDeleteTrailLock` transaction builder payload. + pub fn new( + trail_id: ObjectID, + owner: IotaAddress, + lock: TimeLock, + selected_capability_id: Option, + ) -> Self { + Self { + trail_id, + owner, + lock, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + LockingOps::update_delete_trail_lock( + client, + self.trail_id, + self.owner, + self.lock.clone(), + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for UpdateDeleteTrailLock { + type Error = Error; + type Output = (); + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply(self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + Ok(()) + } +} + +/// Transaction that updates the write lock. +/// +/// This updates only the time lock guarding future record writes. +#[derive(Debug, Clone)] +pub struct UpdateWriteLock { + trail_id: ObjectID, + owner: IotaAddress, + lock: TimeLock, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl UpdateWriteLock { + /// Creates an `UpdateWriteLock` transaction builder payload. + pub fn new( + trail_id: ObjectID, + owner: IotaAddress, + lock: TimeLock, + selected_capability_id: Option, + ) -> Self { + Self { + trail_id, + owner, + lock, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + LockingOps::update_write_lock( + client, + self.trail_id, + self.owner, + self.lock.clone(), + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for UpdateWriteLock { + type Error = Error; + type Output = (); + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply(self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + Ok(()) + } +} diff --git a/audit-trail-rs/src/core/mod.rs b/audit-trail-rs/src/core/mod.rs new file mode 100644 index 00000000..53f08953 --- /dev/null +++ b/audit-trail-rs/src/core/mod.rs @@ -0,0 +1,33 @@ +// Copyright 2020-2025 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Core handles, builders, transactions, and domain types for audit trails. +//! +//! This namespace contains the main trail-facing Rust API: +//! +//! - [`crate::core::access`] exposes role and capability management +//! - [`crate::core::builder`] configures trail creation +//! - [`crate::core::create`] contains the creation transaction types +//! - [`crate::core::locking`] manages trail locking rules +//! - [`crate::core::records`] reads and mutates trail records +//! - [`crate::core::tags`] manages the trail-owned record-tag registry +//! - [`crate::core::trail`] provides the high-level typed handle bound to a specific trail +//! - [`crate::core::types`] contains serializable value types shared across the crate + +/// Role and capability management APIs. +pub mod access; +/// Builder used to configure trail creation. +pub mod builder; +/// Trail-creation transaction types. +pub mod create; +pub(crate) mod internal; +/// Locking configuration APIs. +pub mod locking; +/// Record read and mutation APIs. +pub mod records; +/// Trail-scoped record-tag management APIs. +pub mod tags; +/// High-level trail handle types. +pub mod trail; +/// Shared domain and event types. +pub mod types; diff --git a/audit-trail-rs/src/core/records/mod.rs b/audit-trail-rs/src/core/records/mod.rs new file mode 100644 index 00000000..0e32ec6f --- /dev/null +++ b/audit-trail-rs/src/core/records/mod.rs @@ -0,0 +1,293 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Record read and mutation APIs for audit trails. + +use std::collections::{BTreeMap, HashMap}; + +use iota_interaction::move_core_types::annotated_value::MoveValue; +use iota_interaction::rpc_types::IotaMoveValue; +use iota_interaction::types::TypeTag; +use iota_interaction::types::base_types::ObjectID; +use iota_interaction::types::collection_types::LinkedTable; +use iota_interaction::types::dynamic_field::DynamicFieldName; +use iota_interaction::{IotaKeySignature, OptionalSync}; +use product_common::core_client::{CoreClient, CoreClientReadOnly}; +use product_common::transaction::transaction_builder::TransactionBuilder; +use secret_storage::Signer; +use serde::de::DeserializeOwned; + +use crate::core::internal::{linked_table, trail as trail_reader}; +use crate::core::trail::{AuditTrailFull, AuditTrailReadOnly}; +use crate::core::types::{Data, PaginatedRecord, Record}; +use crate::error::Error; + +mod operations; +mod transactions; + +pub use transactions::{AddRecord, DeleteRecord, DeleteRecordsBatch}; + +use self::operations::RecordsOps; + +const MAX_LIST_PAGE_LIMIT: usize = 1_000; + +/// Record API scoped to a specific trail. +/// +/// This handle builds record-oriented transactions and loads record data from the trail's linked-table storage. +#[derive(Debug, Clone)] +pub struct TrailRecords<'a, C, D = Data> { + pub(crate) client: &'a C, + pub(crate) trail_id: ObjectID, + pub(crate) selected_capability_id: Option, + pub(crate) _phantom: std::marker::PhantomData, +} + +impl<'a, C, D> TrailRecords<'a, C, D> { + pub(crate) fn new(client: &'a C, trail_id: ObjectID, selected_capability_id: Option) -> Self { + Self { + client, + trail_id, + selected_capability_id, + _phantom: std::marker::PhantomData, + } + } + + /// Uses the provided capability as the auth capability for subsequent write operations. + pub fn using_capability(mut self, capability_id: ObjectID) -> Self { + self.selected_capability_id = Some(capability_id); + self + } + + /// Loads a single record by sequence number. + /// + /// # Errors + /// + /// Returns an error if the record cannot be loaded or deserialized. + pub async fn get(&self, sequence_number: u64) -> Result, Error> + where + C: AuditTrailReadOnly, + D: DeserializeOwned, + { + let tx = RecordsOps::get_record(self.client, self.trail_id, sequence_number).await?; + self.client.execute_read_only_transaction(tx).await + } + + /// Builds a transaction that appends a record to the trail. + /// + /// Tagged writes must reference a tag already defined on the trail. They also require a capability whose + /// role allows both `AddRecord` and the requested tag. + pub fn add(&self, data: D, metadata: Option, tag: Option) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + D: Into, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(AddRecord::new( + self.trail_id, + owner, + data.into(), + metadata, + tag, + self.selected_capability_id, + )) + } + + /// Builds a transaction that deletes a single record. + /// + /// Deletion remains subject to record locking rules and tag-based access restrictions enforced on-chain. + pub fn delete(&self, sequence_number: u64) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(DeleteRecord::new( + self.trail_id, + owner, + sequence_number, + self.selected_capability_id, + )) + } + + /// Builds a transaction that deletes up to `limit` records in one operation. + /// + /// Batch deletion removes records from the front of the trail and requires `DeleteAllRecords`. + pub fn delete_records_batch(&self, limit: u64) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(DeleteRecordsBatch::new( + self.trail_id, + owner, + limit, + self.selected_capability_id, + )) + } + + /// Placeholder for a future correction helper. + /// + /// # Errors + /// + /// Always returns [`Error::NotImplemented`]. + pub async fn correct(&self, _replaces: Vec, _data: D, _metadata: Option) -> Result<(), Error> + where + C: AuditTrailFull, + { + Err(Error::NotImplemented("TrailRecords::correct")) + } + + /// Returns the number of records currently stored in the trail. + /// + /// # Errors + /// + /// Returns an error if the count cannot be computed from the current on-chain state. + pub async fn record_count(&self) -> Result + where + C: AuditTrailReadOnly, + { + let tx = RecordsOps::record_count(self.client, self.trail_id).await?; + self.client.execute_read_only_transaction(tx).await + } + + /// Lists all records into a [`HashMap`]. + /// + /// This traverses the full on-chain linked table and can be expensive for large trails. + /// For paginated access, use [`list_page`](Self::list_page). + pub async fn list(&self) -> Result>, Error> + where + C: AuditTrailReadOnly, + D: DeserializeOwned, + { + let records_table = self.load_records_table().await?; + list_linked_table::<_, Record>(self.client, &records_table, None).await + } + + /// Lists all records with a hard cap to protect against expensive traversals. + pub async fn list_with_limit(&self, max_entries: usize) -> Result>, Error> + where + C: AuditTrailReadOnly, + D: DeserializeOwned, + { + let records_table = self.load_records_table().await?; + list_linked_table::<_, Record>(self.client, &records_table, Some(max_entries)).await + } + + /// Lists one page of linked-table records starting from `cursor`. + /// + /// Pass `None` for the first page; use `next_cursor` for subsequent pages. + pub async fn list_page(&self, cursor: Option, limit: usize) -> Result, Error> + where + C: AuditTrailReadOnly, + D: DeserializeOwned, + { + if limit > MAX_LIST_PAGE_LIMIT { + return Err(Error::InvalidArgument(format!( + "page limit {limit} exceeds max supported page size {MAX_LIST_PAGE_LIMIT}" + ))); + } + + let records_table = self.load_records_table().await?; + let (records, next_cursor) = + list_linked_table_page::<_, Record>(self.client, &records_table, cursor, limit).await?; + + Ok(PaginatedRecord { + has_next_page: next_cursor.is_some(), + next_cursor, + records, + }) + } + + async fn load_records_table(&self) -> Result, Error> + where + C: AuditTrailReadOnly, + { + trail_reader::get_audit_trail(self.trail_id, self.client) + .await + .map(|on_chain_trail| on_chain_trail.records) + } +} + +async fn list_linked_table_page( + client: &C, + table: &LinkedTable, + start_key: Option, + limit: usize, +) -> Result<(BTreeMap, Option), Error> +where + C: CoreClientReadOnly + OptionalSync, + V: DeserializeOwned, +{ + // Preserve linked-table order while exposing a page as a stable Rust map keyed by sequence number. + if limit == 0 { + return Ok((BTreeMap::new(), start_key.or(table.head))); + } + + let mut cursor = start_key.or(table.head); + let mut items = BTreeMap::new(); + + for _ in 0..limit { + let Some(key) = cursor else { break }; + + if items.contains_key(&key) { + return Err(Error::UnexpectedApiResponse(format!( + "cycle detected while traversing linked-table {table_id}; repeated key {key}", + table_id = table.id + ))); + } + + let node = linked_table::fetch_node::<_, u64, V>( + client, + table.id, + DynamicFieldName { + type_: TypeTag::U64, + value: IotaMoveValue::from(MoveValue::U64(key)).to_json_value(), + }, + ) + .await?; + + cursor = node.next; + items.insert(key, node.value); + } + + Ok((items, cursor)) +} + +async fn list_linked_table( + client: &C, + table: &LinkedTable, + max_entries: Option, +) -> Result, Error> +where + C: CoreClientReadOnly + OptionalSync, + V: DeserializeOwned, +{ + // Full traversal is only allowed when the caller explicitly accepts the current linked-table size. + let expected = table.size as usize; + let cap = max_entries.unwrap_or(expected); + + if expected > cap { + return Err(Error::InvalidArgument(format!( + "linked-table size {expected} exceeds max_entries {cap}" + ))); + } + + let (entries, next_key) = list_linked_table_page(client, table, None, expected).await?; + + if entries.len() != expected { + return Err(Error::UnexpectedApiResponse(format!( + "linked-table traversal mismatch; expected {expected} entries, got {}", + entries.len() + ))); + } + + if next_key.is_some() { + return Err(Error::UnexpectedApiResponse(format!( + "linked-table traversal has extra entries beyond declared size {expected}" + ))); + } + + Ok(entries.into_iter().collect()) +} diff --git a/audit-trail-rs/src/core/records/operations.rs b/audit-trail-rs/src/core/records/operations.rs new file mode 100644 index 00000000..781e6909 --- /dev/null +++ b/audit-trail-rs/src/core/records/operations.rs @@ -0,0 +1,166 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Internal record-operation helpers that build trail-scoped programmable transactions. +//! +//! These helpers enforce the Rust-side preflight checks around record tags and then encode the exact Move call +//! arguments expected by the trail package. + +use iota_interaction::OptionalSync; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::transaction::ProgrammableTransaction; +use product_common::core_client::CoreClientReadOnly; + +use crate::core::internal::capability::find_capable_cap_for_tag; +use crate::core::internal::{trail as trail_reader, tx}; +use crate::core::types::{Data, Permission}; +use crate::error::Error; + +/// Internal namespace for record-related transaction construction. +pub(super) struct RecordsOps; + +impl RecordsOps { + /// Builds the `add_record` call. + /// + /// Tagged writes are prevalidated against the trail tag registry and require a capability whose role allows + /// both `AddRecord` and the requested tag. + pub(super) async fn add_record( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + data: Data, + record_metadata: Option, + record_tag: Option, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let package_id = client.package_id(); + if let Some(tag) = record_tag.clone() { + let trail = trail_reader::get_audit_trail(trail_id, client).await?; + if !trail.tags.contains_key(&tag) { + return Err(Error::InvalidArgument(format!( + "record tag '{tag}' is not defined for trail {trail_id}" + ))); + } + let cap_ref = if let Some(capability_id) = selected_capability_id { + tx::get_object_ref_by_id(client, &capability_id).await? + } else { + find_capable_cap_for_tag(client, owner, trail_id, &trail, &tag).await? + }; + + tx::build_trail_transaction_with_cap_ref(client, trail_id, cap_ref, "add_record", |ptb, trail_tag| { + data.ensure_matches_tag(trail_tag, package_id)?; + + let data_arg = data.into_ptb(ptb, package_id)?; + let metadata = tx::ptb_pure(ptb, "record_metadata", record_metadata)?; + let tag_arg = tx::ptb_pure(ptb, "record_tag", Some(tag))?; + let clock = tx::get_clock_ref(ptb); + Ok(vec![data_arg, metadata, tag_arg, clock]) + }) + .await + } else { + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::AddRecord, + selected_capability_id, + "add_record", + |ptb, trail_tag| { + data.ensure_matches_tag(trail_tag, package_id)?; + + let data_arg = data.into_ptb(ptb, package_id)?; + let metadata = tx::ptb_pure(ptb, "record_metadata", record_metadata)?; + let tag = tx::ptb_pure(ptb, "record_tag", Option::::None)?; + let clock = tx::get_clock_ref(ptb); + Ok(vec![data_arg, metadata, tag, clock]) + }, + ) + .await + } + } + + /// Builds the `delete_record` call. + /// + /// Authorization and locking remain enforced by the Move entry point. + pub(super) async fn delete_record( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + sequence_number: u64, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::DeleteRecord, + selected_capability_id, + "delete_record", + |ptb, _| { + let seq = tx::ptb_pure(ptb, "sequence_number", sequence_number)?; + let clock = tx::get_clock_ref(ptb); + Ok(vec![seq, clock]) + }, + ) + .await + } + + /// Builds the `delete_records_batch` call. + /// + /// Batch deletion requires `DeleteAllRecords` and deletes from the front of the trail. + pub(super) async fn delete_records_batch( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + limit: u64, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::DeleteAllRecords, + selected_capability_id, + "delete_records_batch", + |ptb, _| { + let limit_arg = tx::ptb_pure(ptb, "limit", limit)?; + let clock = tx::get_clock_ref(ptb); + Ok(vec![limit_arg, clock]) + }, + ) + .await + } + + /// Builds the read-only `get_record` call. + pub(super) async fn get_record( + client: &C, + trail_id: ObjectID, + sequence_number: u64, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_read_only_transaction(client, trail_id, "get_record", |ptb| { + let seq = tx::ptb_pure(ptb, "sequence_number", sequence_number)?; + Ok(vec![seq]) + }) + .await + } + + /// Builds the read-only `record_count` call. + pub(super) async fn record_count(client: &C, trail_id: ObjectID) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_read_only_transaction(client, trail_id, "record_count", |_| Ok(vec![])).await + } +} diff --git a/audit-trail-rs/src/core/records/transactions.rs b/audit-trail-rs/src/core/records/transactions.rs new file mode 100644 index 00000000..ab4c4a63 --- /dev/null +++ b/audit-trail-rs/src/core/records/transactions.rs @@ -0,0 +1,295 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Transaction payloads for record writes and deletions. +//! +//! These types cache the generated programmable transaction, delegate PTB construction to +//! [`super::operations::RecordsOps`], and decode record events into typed Rust outputs. + +use async_trait::async_trait; +use iota_interaction::OptionalSync; +use iota_interaction::rpc_types::{IotaTransactionBlockEffects, IotaTransactionBlockEvents}; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::transaction::ProgrammableTransaction; +use product_common::core_client::CoreClientReadOnly; +use product_common::transaction::transaction_builder::Transaction; +use tokio::sync::OnceCell; + +use super::operations::RecordsOps; +use crate::core::types::{Data, Event, RecordAdded, RecordDeleted}; +use crate::error::Error; + +// ===== AddRecord ===== + +/// Transaction that appends a record to a trail. +/// +/// Tagged writes require the tag to exist in the trail registry and a capability whose role explicitly allows +/// that tag in addition to `AddRecord`. +#[derive(Debug, Clone)] +pub struct AddRecord { + /// Trail object ID that will receive the record. + pub trail_id: ObjectID, + /// Address authorizing the write. + pub owner: IotaAddress, + /// Record payload to append. + pub data: Data, + /// Optional application-defined metadata. + pub metadata: Option, + /// Optional trail-owned tag to attach to the record. + pub tag: Option, + /// Explicit capability to use instead of auto-selecting one from the owner's wallet. + pub selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl AddRecord { + /// Creates an `AddRecord` transaction builder payload. + pub fn new( + trail_id: ObjectID, + owner: IotaAddress, + data: Data, + metadata: Option, + tag: Option, + selected_capability_id: Option, + ) -> Self { + Self { + trail_id, + owner, + data, + metadata, + tag, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + RecordsOps::add_record( + client, + self.trail_id, + self.owner, + self.data.clone(), + self.metadata.clone(), + self.tag.clone(), + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for AddRecord { + type Error = Error; + type Output = RecordAdded; + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply_with_events( + mut self, + _: &mut IotaTransactionBlockEffects, + events: &mut IotaTransactionBlockEvents, + _: &C, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let event = events + .data + .iter() + .find_map(|data| serde_json::from_value::>(data.parsed_json.clone()).ok()) + .ok_or_else(|| Error::UnexpectedApiResponse("RecordAdded event not found".to_string()))?; + + Ok(event.data) + } + + async fn apply(mut self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + unreachable!() + } +} + +// ===== DeleteRecord ===== + +/// Transaction that deletes a single record. +/// +/// This uses the single-record delete entry point, which remains subject to record-locking and tag-aware +/// authorization checks. +#[derive(Debug, Clone)] +pub struct DeleteRecord { + /// Trail object ID containing the record. + pub trail_id: ObjectID, + /// Address authorizing the deletion. + pub owner: IotaAddress, + /// Sequence number of the record to delete. + pub sequence_number: u64, + /// Explicit capability to use instead of auto-selecting one from the owner's wallet. + pub selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl DeleteRecord { + /// Creates a `DeleteRecord` transaction builder payload. + pub fn new( + trail_id: ObjectID, + owner: IotaAddress, + sequence_number: u64, + selected_capability_id: Option, + ) -> Self { + Self { + trail_id, + owner, + sequence_number, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + RecordsOps::delete_record( + client, + self.trail_id, + self.owner, + self.sequence_number, + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for DeleteRecord { + type Error = Error; + type Output = RecordDeleted; + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply_with_events( + mut self, + _: &mut IotaTransactionBlockEffects, + events: &mut IotaTransactionBlockEvents, + _: &C, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let event = events + .data + .iter() + .find_map(|data| serde_json::from_value::>(data.parsed_json.clone()).ok()) + .ok_or_else(|| Error::UnexpectedApiResponse("RecordDeleted event not found".to_string()))?; + + Ok(event.data) + } + + async fn apply(mut self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + unreachable!() + } +} + +// ===== DeleteRecordsBatch ===== + +/// Transaction that deletes multiple records in a batch operation. +/// +/// The Move entry point deletes records from the front of the trail up to `limit` and reports the number of +/// deleted records through the emitted `RecordDeleted` events. +#[derive(Debug, Clone)] +pub struct DeleteRecordsBatch { + /// Trail object ID containing the records. + pub trail_id: ObjectID, + /// Address authorizing the deletion. + pub owner: IotaAddress, + /// Maximum number of records to delete in this batch. + pub limit: u64, + /// Explicit capability to use instead of auto-selecting one from the owner's wallet. + pub selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl DeleteRecordsBatch { + /// Creates a `DeleteRecordsBatch` transaction builder payload. + pub fn new(trail_id: ObjectID, owner: IotaAddress, limit: u64, selected_capability_id: Option) -> Self { + Self { + trail_id, + owner, + limit, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + RecordsOps::delete_records_batch( + client, + self.trail_id, + self.owner, + self.limit, + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for DeleteRecordsBatch { + type Error = Error; + type Output = u64; + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply_with_events( + self, + _: &mut IotaTransactionBlockEffects, + events: &mut IotaTransactionBlockEvents, + _: &C, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let deleted = events + .data + .iter() + .filter_map(|data| serde_json::from_value::>(data.parsed_json.clone()).ok()) + .count() as u64; + + Ok(deleted) + } + + async fn apply(self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + unreachable!() + } +} diff --git a/audit-trail-rs/src/core/tags/mod.rs b/audit-trail-rs/src/core/tags/mod.rs new file mode 100644 index 00000000..c1862a79 --- /dev/null +++ b/audit-trail-rs/src/core/tags/mod.rs @@ -0,0 +1,77 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Record-tag registry APIs for audit trails. + +use iota_interaction::types::base_types::ObjectID; +use iota_interaction::{IotaKeySignature, OptionalSync}; +use product_common::core_client::CoreClient; +use product_common::transaction::transaction_builder::TransactionBuilder; +use secret_storage::Signer; + +use crate::core::trail::AuditTrailFull; + +mod operations; +mod transactions; + +pub use transactions::{AddRecordTag, RemoveRecordTag}; + +/// Tag-registry API scoped to a specific trail. +/// +/// The registry defines the canonical set of tags that records and role-tag restrictions may reference. +#[derive(Debug, Clone)] +pub struct TrailTags<'a, C> { + pub(crate) client: &'a C, + pub(crate) trail_id: ObjectID, + pub(crate) selected_capability_id: Option, +} + +impl<'a, C> TrailTags<'a, C> { + pub(crate) fn new(client: &'a C, trail_id: ObjectID, selected_capability_id: Option) -> Self { + Self { + client, + trail_id, + selected_capability_id, + } + } + + /// Uses the provided capability as the auth capability for subsequent write operations. + pub fn using_capability(mut self, capability_id: ObjectID) -> Self { + self.selected_capability_id = Some(capability_id); + self + } + + /// Adds a tag to the trail-owned record-tag registry. + /// + /// Added tags become available to future tagged record writes and role-tag restrictions. + pub fn add(&self, tag: impl Into) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(AddRecordTag::new( + self.trail_id, + owner, + tag.into(), + self.selected_capability_id, + )) + } + + /// Removes a tag from the trail-owned record-tag registry. + /// + /// Removal fails on-chain while the tag is still referenced by existing records or role-tag policies. + pub fn remove(&self, tag: impl Into) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(RemoveRecordTag::new( + self.trail_id, + owner, + tag.into(), + self.selected_capability_id, + )) + } +} diff --git a/audit-trail-rs/src/core/tags/operations.rs b/audit-trail-rs/src/core/tags/operations.rs new file mode 100644 index 00000000..ba86c650 --- /dev/null +++ b/audit-trail-rs/src/core/tags/operations.rs @@ -0,0 +1,75 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Internal helpers that build record-tag registry transactions. +//! +//! These helpers encode updates to the trail-owned tag registry and select the corresponding tag-management +//! permissions. + +use iota_interaction::OptionalSync; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::transaction::ProgrammableTransaction; +use product_common::core_client::CoreClientReadOnly; + +use crate::core::internal::tx; +use crate::core::types::Permission; +use crate::error::Error; + +/// Internal namespace for tag-registry transaction construction. +pub(super) struct TagsOps; + +impl TagsOps { + /// Builds the `add_record_tag` call. + pub(super) async fn add_record_tag( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + tag: String, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::AddRecordTags, + selected_capability_id, + "add_record_tag", + |ptb, _| { + let tag_arg = tx::ptb_pure(ptb, "tag", tag)?; + let clock = tx::get_clock_ref(ptb); + Ok(vec![tag_arg, clock]) + }, + ) + .await + } + + /// Builds the `remove_record_tag` call. + pub(super) async fn remove_record_tag( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + tag: String, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::DeleteRecordTags, + selected_capability_id, + "remove_record_tag", + |ptb, _| { + let tag_arg = tx::ptb_pure(ptb, "tag", tag)?; + let clock = tx::get_clock_ref(ptb); + Ok(vec![tag_arg, clock]) + }, + ) + .await + } +} diff --git a/audit-trail-rs/src/core/tags/transactions.rs b/audit-trail-rs/src/core/tags/transactions.rs new file mode 100644 index 00000000..171074a0 --- /dev/null +++ b/audit-trail-rs/src/core/tags/transactions.rs @@ -0,0 +1,136 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Transaction payloads for tag-registry updates. + +use async_trait::async_trait; +use iota_interaction::OptionalSync; +use iota_interaction::rpc_types::IotaTransactionBlockEffects; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::transaction::ProgrammableTransaction; +use product_common::core_client::CoreClientReadOnly; +use product_common::transaction::transaction_builder::Transaction; +use tokio::sync::OnceCell; + +use super::operations::TagsOps; +use crate::error::Error; + +/// Transaction that adds a record tag to the trail registry. +/// +/// This extends the canonical tag registry owned by the trail. +#[derive(Debug, Clone)] +pub struct AddRecordTag { + trail_id: ObjectID, + owner: IotaAddress, + tag: String, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl AddRecordTag { + /// Creates an `AddRecordTag` transaction builder payload. + pub fn new(trail_id: ObjectID, owner: IotaAddress, tag: String, selected_capability_id: Option) -> Self { + Self { + trail_id, + owner, + tag, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + TagsOps::add_record_tag( + client, + self.trail_id, + self.owner, + self.tag.clone(), + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for AddRecordTag { + type Error = Error; + type Output = (); + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply(self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + Ok(()) + } +} + +/// Transaction that removes a record tag from the trail registry. +/// +/// Removal only succeeds when the tag is no longer used by records or role-tag restrictions. +#[derive(Debug, Clone)] +pub struct RemoveRecordTag { + trail_id: ObjectID, + owner: IotaAddress, + tag: String, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl RemoveRecordTag { + /// Creates a `RemoveRecordTag` transaction builder payload. + pub fn new(trail_id: ObjectID, owner: IotaAddress, tag: String, selected_capability_id: Option) -> Self { + Self { + trail_id, + owner, + tag, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + TagsOps::remove_record_tag( + client, + self.trail_id, + self.owner, + self.tag.clone(), + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for RemoveRecordTag { + type Error = Error; + type Output = (); + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply(self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + Ok(()) + } +} diff --git a/audit-trail-rs/src/core/trail.rs b/audit-trail-rs/src/core/trail.rs new file mode 100644 index 00000000..8e034a50 --- /dev/null +++ b/audit-trail-rs/src/core/trail.rs @@ -0,0 +1,143 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! High-level trail handles and trail-scoped transactions. + +use iota_interaction::types::base_types::ObjectID; +use iota_interaction::types::transaction::ProgrammableTransaction; +use iota_interaction::{IotaKeySignature, OptionalSync}; +use product_common::core_client::{CoreClient, CoreClientReadOnly}; +use product_common::transaction::transaction_builder::TransactionBuilder; +use secret_storage::Signer; +use serde::de::DeserializeOwned; + +use crate::core::access::TrailAccess; +use crate::core::internal::trail as trail_reader; +use crate::core::locking::TrailLocking; +use crate::core::records::TrailRecords; +use crate::core::tags::TrailTags; +use crate::core::types::{Data, OnChainAuditTrail}; +use crate::error::Error; + +mod operations; +mod transactions; + +pub use transactions::{DeleteAuditTrail, Migrate, UpdateMetadata}; + +/// Marker trait for read-only audit-trail clients. +#[doc(hidden)] +#[cfg_attr(not(feature = "send-sync"), async_trait::async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait::async_trait)] +pub trait AuditTrailReadOnly: CoreClientReadOnly + OptionalSync { + /// Executes a read-only programmable transaction and decodes the first return value. + async fn execute_read_only_transaction(&self, tx: ProgrammableTransaction) + -> Result; +} + +/// Marker trait for full audit-trail clients. +#[doc(hidden)] +pub trait AuditTrailFull: AuditTrailReadOnly {} + +/// A typed handle bound to one trail ID and one client. +/// +/// This is the main trail-scoped entry point. It keeps the trail identity together with the client so record, +/// locking, access, tag, migration, and metadata operations all share one typed handle. +#[derive(Debug, Clone)] +pub struct AuditTrailHandle<'a, C> { + pub(crate) client: &'a C, + pub(crate) trail_id: ObjectID, + pub(crate) selected_capability_id: Option, +} + +impl<'a, C> AuditTrailHandle<'a, C> { + pub(crate) fn new(client: &'a C, trail_id: ObjectID) -> Self { + Self { + client, + trail_id, + selected_capability_id: None, + } + } + + /// Uses the provided capability as the auth capability for subsequent write operations. + pub fn using_capability(mut self, capability_id: ObjectID) -> Self { + self.selected_capability_id = Some(capability_id); + self + } + + /// Loads the full on-chain audit trail object. + /// + /// Each call fetches a fresh snapshot from chain state rather than reusing cached client-side data. + pub async fn get(&self) -> Result + where + C: AuditTrailReadOnly, + { + trail_reader::get_audit_trail(self.trail_id, self.client).await + } + + /// Updates the trail's mutable metadata field. + /// + /// Passing `None` clears the field on-chain. + pub fn update_metadata(&self, metadata: Option) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(UpdateMetadata::new( + self.trail_id, + owner, + metadata, + self.selected_capability_id, + )) + } + + /// Migrates the trail to the latest package version supported by this crate. + pub fn migrate(&self) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(Migrate::new(self.trail_id, owner, self.selected_capability_id)) + } + + /// Deletes the trail object. + /// + /// Deletion requires the trail to be empty and to satisfy the trail-delete lock rules. + pub fn delete_audit_trail(&self) -> TransactionBuilder + where + C: AuditTrailFull + CoreClient, + S: Signer + OptionalSync, + { + let owner = self.client.sender_address(); + TransactionBuilder::new(DeleteAuditTrail::new(self.trail_id, owner, self.selected_capability_id)) + } + + /// Returns the record API scoped to this trail. + /// + /// Use this for record reads, appends, and deletions. + pub fn records(&self) -> TrailRecords<'a, C, Data> { + TrailRecords::new(self.client, self.trail_id, self.selected_capability_id) + } + + /// Returns the locking API scoped to this trail. + /// + /// Use this for inspecting lock state and updating locking rules. + pub fn locking(&self) -> TrailLocking<'a, C> { + TrailLocking::new(self.client, self.trail_id, self.selected_capability_id) + } + + /// Returns the access-control API scoped to this trail. + /// + /// Use this for roles, capabilities, and access-policy updates. + pub fn access(&self) -> TrailAccess<'a, C> { + TrailAccess::new(self.client, self.trail_id, self.selected_capability_id) + } + + /// Returns the tag-registry API scoped to this trail. + /// + /// Use this for managing the canonical tag registry that record writes and role tags must reference. + pub fn tags(&self) -> TrailTags<'a, C> { + TrailTags::new(self.client, self.trail_id, self.selected_capability_id) + } +} diff --git a/audit-trail-rs/src/core/trail/operations.rs b/audit-trail-rs/src/core/trail/operations.rs new file mode 100644 index 00000000..e3ebfa0c --- /dev/null +++ b/audit-trail-rs/src/core/trail/operations.rs @@ -0,0 +1,98 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Internal helpers that build trail-level programmable transactions. +//! +//! These helpers select the required trail-level permission and encode the corresponding metadata, migration, +//! and deletion calls. + +use iota_interaction::OptionalSync; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::transaction::ProgrammableTransaction; +use product_common::core_client::CoreClientReadOnly; + +use crate::core::internal::tx; +use crate::core::types::Permission; +use crate::error::Error; + +/// Internal namespace for trail-level transaction construction. +pub(super) struct TrailOps; + +impl TrailOps { + /// Builds the `migrate` call. + pub(super) async fn migrate( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::Migrate, + selected_capability_id, + "migrate", + |ptb, _| { + let clock = tx::get_clock_ref(ptb); + Ok(vec![clock]) + }, + ) + .await + } + + /// Builds the `update_metadata` call. + pub(super) async fn update_metadata( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + metadata: Option, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::UpdateMetadata, + selected_capability_id, + "update_metadata", + |ptb, _| { + let metadata_arg = tx::ptb_pure(ptb, "new_metadata", metadata)?; + let clock = tx::get_clock_ref(ptb); + Ok(vec![metadata_arg, clock]) + }, + ) + .await + } + + /// Builds the `delete_audit_trail` call. + pub(super) async fn delete_audit_trail( + client: &C, + trail_id: ObjectID, + owner: IotaAddress, + selected_capability_id: Option, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + tx::build_trail_transaction( + client, + trail_id, + owner, + Permission::DeleteAuditTrail, + selected_capability_id, + "delete_audit_trail", + |ptb, _| { + let clock = tx::get_clock_ref(ptb); + Ok(vec![clock]) + }, + ) + .await + } +} diff --git a/audit-trail-rs/src/core/trail/transactions.rs b/audit-trail-rs/src/core/trail/transactions.rs new file mode 100644 index 00000000..8148b385 --- /dev/null +++ b/audit-trail-rs/src/core/trail/transactions.rs @@ -0,0 +1,204 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Transaction payloads for trail-level metadata, migration, and deletion operations. + +use async_trait::async_trait; +use iota_interaction::OptionalSync; +use iota_interaction::rpc_types::{IotaTransactionBlockEffects, IotaTransactionBlockEvents}; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::transaction::ProgrammableTransaction; +use product_common::core_client::CoreClientReadOnly; +use product_common::transaction::transaction_builder::Transaction; +use tokio::sync::OnceCell; + +use super::operations::TrailOps; +use crate::core::types::{AuditTrailDeleted, Event}; +use crate::error::Error; + +/// Transaction that migrates a trail to the latest package version supported by this crate. +/// +/// This requires `Migrate` on the trail and succeeds only when the on-chain package version is older than the +/// current supported version. +#[derive(Debug, Clone)] +pub struct Migrate { + trail_id: ObjectID, + owner: IotaAddress, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl Migrate { + /// Creates a `Migrate` transaction builder payload. + pub fn new(trail_id: ObjectID, owner: IotaAddress, selected_capability_id: Option) -> Self { + Self { + trail_id, + owner, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + TrailOps::migrate(client, self.trail_id, self.owner, self.selected_capability_id).await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for Migrate { + type Error = Error; + type Output = (); + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply(self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + Ok(()) + } +} + +/// Transaction that updates mutable trail metadata. +/// +/// Passing `None` clears the mutable metadata field. +#[derive(Debug, Clone)] +pub struct UpdateMetadata { + trail_id: ObjectID, + owner: IotaAddress, + metadata: Option, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl UpdateMetadata { + /// Creates an `UpdateMetadata` transaction builder payload. + pub fn new( + trail_id: ObjectID, + owner: IotaAddress, + metadata: Option, + selected_capability_id: Option, + ) -> Self { + Self { + trail_id, + owner, + metadata, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + TrailOps::update_metadata( + client, + self.trail_id, + self.owner, + self.metadata.clone(), + self.selected_capability_id, + ) + .await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for UpdateMetadata { + type Error = Error; + type Output = (); + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply(self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + Ok(()) + } +} + +/// Transaction that deletes an empty trail. +/// +/// Deletion still depends on the trail-delete permission, an empty record set, and the configured trail-delete +/// lock. +#[derive(Debug, Clone)] +pub struct DeleteAuditTrail { + trail_id: ObjectID, + owner: IotaAddress, + selected_capability_id: Option, + cached_ptb: OnceCell, +} + +impl DeleteAuditTrail { + /// Creates a `DeleteAuditTrail` transaction builder payload. + pub fn new(trail_id: ObjectID, owner: IotaAddress, selected_capability_id: Option) -> Self { + Self { + trail_id, + owner, + selected_capability_id, + cached_ptb: OnceCell::new(), + } + } + + async fn make_ptb(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + TrailOps::delete_audit_trail(client, self.trail_id, self.owner, self.selected_capability_id).await + } +} + +#[cfg_attr(not(feature = "send-sync"), async_trait(?Send))] +#[cfg_attr(feature = "send-sync", async_trait)] +impl Transaction for DeleteAuditTrail { + type Error = Error; + type Output = AuditTrailDeleted; + + async fn build_programmable_transaction(&self, client: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + self.cached_ptb.get_or_try_init(|| self.make_ptb(client)).await.cloned() + } + + async fn apply_with_events( + self, + _: &mut IotaTransactionBlockEffects, + events: &mut IotaTransactionBlockEvents, + _: &C, + ) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + let event = events + .data + .iter() + .find_map(|data| serde_json::from_value::>(data.parsed_json.clone()).ok()) + .ok_or_else(|| Error::UnexpectedApiResponse("Expected AuditTrailDeleted event not found".to_string()))?; + + Ok(event.data) + } + + async fn apply(self, _: &mut IotaTransactionBlockEffects, _: &C) -> Result + where + C: CoreClientReadOnly + OptionalSync, + { + unreachable!() + } +} diff --git a/audit-trail-rs/src/core/types/RoleMap-README.md b/audit-trail-rs/src/core/types/RoleMap-README.md new file mode 100644 index 00000000..4fec6838 --- /dev/null +++ b/audit-trail-rs/src/core/types/RoleMap-README.md @@ -0,0 +1,529 @@ +# Role-Based Access Control for Audit Trails + +Audit trails provide an access control registry (a.k.a. `RoleMap`), defining who may perform which +operations by combining two primitives: + +- **Roles** — named permission sets stored on the trail. +- **Capabilities** — on-chain objects held by users, each linked to one role. + +Every operation on a trail (adding a record, deleting a role, revoking a +capability, …) requires the caller to present a `Capability`. The audit trail +validates the capability before allowing the operation. + +--- + +## Concepts + +### Roles + +A role is a named and configurable set of `Permission` values, for example: + +| Role name | Permissions | +| :------------- | :------------------------------------------------------------------------------------------------------- | +| `Admin` | AddRoles, UpdateRoles, DeleteRoles, AddCapabilities, RevokeCapabilities, AddRecordTags, DeleteRecordTags | +| `RecordAdmin` | AddRecord, DeleteRecord, CorrectRecord | +| `LockingAdmin` | UpdateLockingConfig (and sub-variants) | +| `Auditor` | _(read-only — no write permissions needed)_ | + +Roles are identified by a unique string name within the trail. Multiple +capabilities can be issued for the same role, to allow users or services to share +that access level. + +Roles may optionally carry a `RoleTags` allowlist (see [Record Tags](#record-tags-and-roletags)). + +### Capabilities + +A `Capability` is an on-chain object owned by a wallet address. It records: + +| Field | Meaning | +| :------------ | :----------------------------------------------------------------- | +| `target_key` | The `ObjectID` of the trail this capability is valid for. | +| `role` | The role name — determines which permissions the holder has. | +| `issued_to` | Optional address binding; only that address may present the cap. | +| `valid_from` | Optional Unix-ms timestamp before which the cap is not yet active. | +| `valid_until` | Optional Unix-ms timestamp after which the cap expires. | + +Possessing a capability does **not** automatically grant access. The audit trail +validates all fields above on every call before the operation is executed. + +### The Admin Role + +When a trail is created, the access control registry is initialized with exactly one role — +the **initial admin role** (named `"Admin"`). A corresponding capability +object is minted and transferred to the trail creator (or a custom address +supplied via `with_admin`). + +The Admin role is protected by two invariants: + +1. It can **never be deleted**. +2. Although its permission set can be updated, it needs to include a minimum set of + permissions to manage the trail's access control (AddRoles, UpdateRoles, DeleteRoles, + AddCapabilities, RevokeCapabilities). Removing any of these permissions from the Admin + role will fail. + +Initial admin capabilities are tracked in `initial_admin_cap_ids` and must be +managed through dedicated entry-points (`revoke_initial_admin_capability`, +`destroy_initial_admin_capability`). + +--- + +## Lifecycle Example + +### 1 — Trail is created + +``` +Trail creator ──create_trail()──► AuditTrail (shared object) + │ + └── RoleMap + ├── roles: { "Admin" → [AddRoles, …] } + ├── initial_admin_role_name: "Admin" + └── initial_admin_cap_ids: { cap_id } + ◄── Admin Capability (owned object, transferred to creator) +``` + +### 2 — Admin defines additional roles + +The trail creator (Admin capability holder) defines a `RecordAdmin` role: + +``` +Admin Capability + create_role("RecordAdmin", [AddRecord, DeleteRecord, CorrectRecord]) + ──► RoleMap.roles: { "Admin" → […], "RecordAdmin" → [AddRecord, DeleteRecord, CorrectRecord] } +``` + +### 3 — Admin issues capabilities to operators + +``` +Admin Capability + issue_capability("RecordAdmin", issued_to = operator_address) + ──► RecordAdmin Capability (owned object, transferred to operator) +``` + +### 4 — Operator uses their capability + +``` +RecordAdmin Capability + add_record(trail, data) + ──► RoleMap.assert_capability_valid(cap, AddRecord) // validated + ──► Record appended to trail +``` + +### 5 — Admin revokes a capability + +``` +Admin Capability + revoke_capability(cap_id, valid_until) + ──► RoleMap.revoked_capabilities: { cap_id → valid_until_ms } +``` + +Please note: Revoked capability objects still exist on-chain but will be rejected by +`assert_capability_valid`. The holder can no longer use it. + +--- + +## Rust API Quick Reference + +### Creating a trail and obtaining the Admin capability + +```rust +use audit_trail::core::types::{Data, InitialRecord, ImmutableMetadata}; + +let created = client + .create_trail() + .with_trail_metadata(ImmutableMetadata::new("My Trail".into(), None)) + .with_initial_record(InitialRecord::new(Data::text("first entry"), None, None)) + .finish() + .build_and_execute(&client) + .await? + .output; // TrailCreated { trail_id, creator, timestamp } + +// The Admin capability is now in the creator's wallet. +``` + +### Defining a new role + +```rust +use audit_trail::core::types::PermissionSet; + +client + .trail(created.trail_id) + .access() + .for_role("RecordAdmin") + .create(PermissionSet::record_admin_permissions(), None) + .build_and_execute(&client) + .await?; +``` + +### Issuing a capability + +```rust +use audit_trail::core::types::CapabilityIssueOptions; + +// Unrestricted — any holder may use this capability +let cap = client + .trail(created.trail_id) + .access() + .for_role("RecordAdmin") + .issue_capability(CapabilityIssueOptions::default()) + .build_and_execute(&client) + .await? + .output; // CapabilityIssued { capability_id, target_key, role, … } + +// Address-bound and time-limited +let cap = client + .trail(created.trail_id) + .access() + .for_role("RecordAdmin") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(operator_address), + valid_from_ms: None, + valid_until_ms: Some(1_800_000_000_000), // expires at this Unix-ms timestamp + }) + .build_and_execute(&client) + .await? + .output; +``` + +### Revoking a capability + +```rust +client + .trail(trail_id) + .access() + .revoke_capability(cap.capability_id, cap.valid_until) + .build_and_execute(&client) + .await?; +``` + +### Cleaning up the denylist + +```rust +// Removes all denylist entries whose valid_until has already passed. +client + .trail(trail_id) + .access() + .cleanup_revoked_capabilities() + .build_and_execute(&client) + .await?; +``` + +### Updating a role's permissions + +```rust +use audit_trail::core::types::{Permission, PermissionSet}; +use std::collections::HashSet; + +client + .trail(trail_id) + .access() + .for_role("RecordAdmin") + .update_permissions( + PermissionSet { + permissions: HashSet::from([Permission::AddRecord, Permission::CorrectRecord]), + }, + None, // no RoleTags change + ) + .build_and_execute(&client) + .await?; +``` + +### Deleting a role + +```rust +client + .trail(trail_id) + .access() + .for_role("RecordAdmin") + .delete() + .build_and_execute(&client) + .await?; +// Note: the initial admin role ("Admin") cannot be deleted. +``` + +--- + +## Record Tags and RoleTags + +Tags are string labels that can be attached to individual records. They are +managed through a **tag registry** on the trail: a tag must be registered +before it can be used on a record or referenced by a role. + +### Why use tags? + +Tags enable fine-grained access control beyond simple permission checks. For +example, a legal department may only be allowed to access records tagged +`"legal"`, while the finance team works with records tagged `"finance"`. + +### How tags interact with roles + +A role may carry an optional `RoleTags` allowlist. When a capability holder +adds a record with a tag, the audit trail checks that: + +1. The tag is registered in the trail's tag registry. +2. The role associated with the capability includes the requested tag in its + `RoleTags` allowlist. + +If either check fails the transaction is rejected. + +The same checks apply when a record having a tag is updated or deleted. + +Please note: + +- Tags only restrict the use of tagged records to roles that explicitly + grant access to those tags in the associated `RoleTags` allowlist. +- Tags do not grant access permission themselves. A role still needs the relevant + permissions (e.g. `AddRecord`) to perform operations on tagged records. +- A role without any `RoleTags` can operate on any record not having tags, as long + as it has the necessary permissions. + +### Example — tagged records + +```rust +// 1. Create trail with a tag registry +let created = client + .create_trail() + .with_record_tags(["finance", "legal"]) + .with_initial_record(InitialRecord::new(Data::text("opening entry"), None, None)) + .finish() + .build_and_execute(&client) + .await? + .output; + +// 2. Create a role that may only write "finance" tagged records +use audit_trail::core::types::RoleTags; + +client + .trail(created.trail_id) + .access() + .for_role("FinanceWriter") + .create( + PermissionSet { permissions: HashSet::from([Permission::AddRecord]) }, + Some(RoleTags::new(["finance"])), + ) + .build_and_execute(&client) + .await?; +``` + +A `FinanceWriter` capability holder can add records tagged `"finance"` but not +records tagged `"legal"`. + +--- + +## Capability Validation Rules + +Every operation on a trail calls `assert_capability_valid` before executing. +The checks run in the order listed below; the transaction aborts on the +**first** failing check. + +### 1 — `ECapabilityTargetKeyMismatch` + +The capability's `target_key` must match the `target_key` of the RoleMap +(which is typically the `ObjectID` of the audit trail). This prevents a +capability issued for one trail from being used on a different trail. + +### 2 — `ERoleDoesNotExist` + +The role name stored in the capability must still exist in the RoleMap. If +an admin deleted the role after the capability was issued, the capability +becomes unusable — even though it was never explicitly revoked. + +### 3 — `ECapabilityPermissionDenied` + +The role's current permission set must contain the permission required by the +operation being performed. For example, calling `add_record` requires the +`AddRecord` permission. If the role was updated after the capability was +issued and the required permission was removed, existing capabilities for +that role will start failing this check. + +### 4 — `ECapabilityHasBeenRevoked` + +The capability's ID must **not** appear in the `revoked_capabilities` +denylist. A capability that has been revoked via `revoke_capability` (or +`revoke_initial_admin_capability`) is permanently rejected, even if it is +still within its validity window. See +[Managing Revoked Capabilities](#managing-revoked-capabilities) for details. + +### 5 — `ECapabilityTimeConstraintsNotMet` + +This check only runs when the capability has a `valid_from` and/or +`valid_until` field set. The current on-chain clock time must satisfy: + +- `valid_from`: current time **>=** `valid_from` (the capability is not yet + active before this timestamp). +- `valid_until`: current time **<=** `valid_until` (the capability has + expired after this timestamp). + +If neither field is set, this check is skipped entirely and the capability is +considered valid at any point in time. + +### 6 — `ECapabilityIssuedToMismatch` + +This check only runs when the capability has a non-empty `issued_to` field. +The address of the transaction sender must match the `issued_to` address +stored in the capability. This binds the capability to a specific wallet, +preventing it from being used by anyone else even if the on-chain object is +transferred. + +If `issued_to` is not set, any holder of the capability object may use it. + +### 7 — `ERecordTagNotDefined` / `ERecordTagNotAllowed` + +This check is performed by the audit trail **after** all `RoleMap` checks +(1–6) have passed. It only applies to record operations (add, correct, +delete) that involve a tagged record. + +When a record carries a tag, two additional conditions must hold: + +1. The tag must be registered in the trail's **tag registry** + (`ERecordTagNotDefined`). +2. The role associated with the capability must include the tag in its + `RoleTags` allowlist (`ERecordTagNotAllowed`). A role without any + `RoleTags` is **not** permitted to operate on tagged records. + +If the record has no tag, this check is skipped. See +[Record Tags and RoleTags](#record-tags-and-roletags) for a full explanation +and examples. + +### Summary + +| # | Check | Error | Skippable | +| :- | :---------------------- | :---------------------------------------------- | :------------------------------------------------- | +| 1 | `target_key` mismatch | `ECapabilityTargetKeyMismatch` | No | +| 2 | Role does not exist | `ERoleDoesNotExist` | No | +| 3 | Permission not in role | `ECapabilityPermissionDenied` | No | +| 4 | ID in revoked denylist | `ECapabilityHasBeenRevoked` | No | +| 5 | Outside validity window | `ECapabilityTimeConstraintsNotMet` | Yes — only if `valid_from` or `valid_until` is set | +| 6 | `issued_to` mismatch | `ECapabilityIssuedToMismatch` | Yes — only if `issued_to` is set | +| 7 | Record tag not allowed | `ERecordTagNotDefined` / `ERecordTagNotAllowed` | Yes — only for record operations on tagged records | + +--- + +## Managing Revoked Capabilities + +### The `revoked_capabilities` Denylist + +When a capability is revoked it is **not deleted from the chain** — the +on-chain `Capability` object still exists in the holder's wallet. Instead, +the capability's ID is added to a **denylist** stored inside the audit trail. +During every call to an access restricted audit trail function, the internally +called `assert_capability_valid` function checks the denylist and rejects any capability whose +ID appears in it (error `ECapabilityHasBeenRevoked`). + +The denylist approach (as opposed to an allowlist of all issued capabilities) +was chosen deliberately: it keeps on-chain storage proportional to the number +of _currently revoked_ capabilities rather than the total number ever issued. +This is important for deployments that issue large numbers of capabilities over +time. + +Each denylist entry maps a revoked capability ID to a `valid_until` timestamp +(Unix milliseconds). If the revoked capability had no `valid_until` field, the +stored value is `0`, which signals "no expiry — keep in the denylist +indefinitely". + +### How Time-Restricted Capabilities Affect Management + +Capabilities can carry optional `valid_from` and `valid_until` timestamps. +These fields are enforced by the internally used `assert_capability_valid`: +a capability whose +time window has not yet started or has already passed is rejected with +`ECapabilityTimeConstraintsNotMet`, regardless of whether it appears in the +denylist. + +This has an important consequence for revocation: **once a capability's +`valid_until` timestamp has passed, the capability is naturally expired and +can no longer be used — even if it was never explicitly revoked.** Its +denylist entry therefore becomes redundant and can be safely removed. + +The `cleanup_revoked_capabilities` function exploits this property. It +iterates through the denylist and removes every entry whose stored +`valid_until` value is **non-zero** and **less than** the current clock time. +Entries with `valid_until == 0` (capabilities that were issued without an +expiry or where the revoker did not supply the `valid_until` value during the +`revoke_capability` call) are kept because the corresponding capabilities never +expire on their own. + +**Best practice:** always set a `valid_until` when issuing capabilities. +Even a generous validity window (e.g. one year) ensures that the +corresponding denylist entry can be automatically cleaned up after the +capability expires, rather than occupying storage indefinitely. + +### Off-Chain Tracking Requirements + +Because the audit trail uses a denylist and not an allowlist, it does **not** +maintain an on-chain registry of all issued capabilities. Tracking every +issued capability on-chain would increase storage costs and slow down +validity checks. + +This design shifts the bookkeeping responsibility to the user: + +1. **Maintain an off-chain registry of every issued capability**, storing at + least the capability `ID`, the `role` it was issued for, the `issued_to` + address (if any), and the `valid_from` / `valid_until` timestamps. +2. **When revoking**, supply the correct capability ID and its `valid_until` + value (via the `cap_to_revoke_valid_until` parameter). The + `revoke_capability` function does **not** verify that the supplied ID + actually refers to a real, previously-issued capability — if you pass a + random ID, it will be silently added to the denylist without error. + Accurate off-chain records are therefore essential. +3. **Track which capabilities have been revoked or destroyed** so you do not + attempt to revoke the same capability twice (which would abort with + `ECapabilityToRevokeHasAlreadyBeenRevoked`). + +The off-chain capability registry can also be used to manage capability renewal: +when a capability is about to expire, a new capability is automatically issued for the +holder with an updated validity window. The old capability can be revoked or destroyed +at the same time. This process can be fully automated by a background service that +monitors capability expirations and performs renewals as needed. + +For deployments that only issue a small number of capabilities, a simplified +approach is acceptable: track only the issued capability IDs and pass +`None` for `cap_to_revoke_valid_until` when revoking capabilities using the +`revoke_capability` function. The trade-off is that +those denylist entries will never be automatically cleaned up — they persist +until the capability object is explicitly destroyed. + +### Cleaning Up the Denylist + +Over time the denylist can accumulate entries for capabilities that have +already naturally expired. The `cleanup_revoked_capabilities` function +removes these stale entries: + +1. It walks through every entry in the `revoked_capabilities` linked table. +2. For each entry with a **non-zero** `valid_until` value that is **less than** + the current on-chain clock time, the entry is removed. +3. Entries with `valid_until == 0` are skipped — they represent capabilities + that have no natural expiry and must remain on the denylist until the + capability object itself is destroyed (via `destroy_capability`). + +The cleanup operation requires a capability with the `RevokeCapabilities` +permission. + +**Recommendations for keeping the denylist short:** + +- Always provide the `cap_to_revoke_valid_until` value that matches the `valid_until` of the + revoked capability when revoking a capability so that + the entry becomes eligible for automatic cleanup. +- Call `cleanup_revoked_capabilities` periodically (e.g. as a maintenance + transaction) to reclaim storage. +- When a revoked capability is no longer needed at all, have the holder call + `destroy_capability` to delete the on-chain object. Destroying a + capability also removes it from the denylist if it was listed there. + +--- + +## Permission Sets + +`PermissionSet` provides convenience constructors for common role profiles: + +| Constructor | Permissions | +| :----------------------------- | :------------------------------------------------------------------------------------------------------- | +| `admin_permissions()` | AddRoles, UpdateRoles, DeleteRoles, AddCapabilities, RevokeCapabilities, AddRecordTags, DeleteRecordTags | +| `record_admin_permissions()` | AddRecord, DeleteRecord, CorrectRecord | +| `locking_admin_permissions()` | UpdateLockingConfig (and all sub-variants) | +| `cap_admin_permissions()` | AddCapabilities, RevokeCapabilities | +| `tag_admin_permissions()` | AddRecordTags, DeleteRecordTags | +| `metadata_admin_permissions()` | UpdateMetadata, DeleteMetadata | + +Please note: + +- These constructors are just for convenience and do not enforce any invariants. + For example, you could (not recommended) create a role named `NormalUser` with + `PermissionSet::admin_permissions()`. +- You can create custom permission sets by constructing a `PermissionSet` with + an arbitrary combination of permissions. diff --git a/audit-trail-rs/src/core/types/audit_trail.rs b/audit-trail-rs/src/core/types/audit_trail.rs new file mode 100644 index 00000000..962a3218 --- /dev/null +++ b/audit-trail-rs/src/core/types/audit_trail.rs @@ -0,0 +1,119 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::HashMap; +use std::str::FromStr; + +use iota_interaction::ident_str; +use iota_interaction::types::TypeTag; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::collection_types::LinkedTable; +use iota_interaction::types::id::UID; +use iota_interaction::types::programmable_transaction_builder::ProgrammableTransactionBuilder as Ptb; +use iota_interaction::types::transaction::Argument; +use serde::{Deserialize, Serialize}; + +use super::locking::LockingConfig; +use super::role_map::RoleMap; +use crate::core::internal::move_collections::deserialize_vec_map; +use crate::core::internal::tx; +use crate::error::Error; + +/// Registry of trail-owned record tags. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct TagRegistry { + /// Mapping from tag name to usage count. + #[serde(deserialize_with = "deserialize_vec_map")] + pub tag_map: HashMap, +} + +impl TagRegistry { + /// Returns the number of registered tags. + pub fn len(&self) -> usize { + self.tag_map.len() + } + + /// Returns `true` when no tags are registered. + pub fn is_empty(&self) -> bool { + self.tag_map.is_empty() + } + + /// Returns `true` when the registry contains the given tag. + pub fn contains_key(&self, tag: &str) -> bool { + self.tag_map.contains_key(tag) + } + + /// Returns the usage count for a tag. + pub fn get(&self, tag: &str) -> Option<&u64> { + self.tag_map.get(tag) + } + + /// Iterates over tag names and usage counts. + pub fn iter(&self) -> impl Iterator { + self.tag_map.iter() + } +} + +/// An audit trail stored on-chain. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct OnChainAuditTrail { + /// Unique object ID of the trail. + pub id: UID, + /// Address that created the trail. + pub creator: IotaAddress, + /// Millisecond timestamp at which the trail was created. + pub created_at: u64, + /// Current record sequence number cursor. + pub sequence_number: u64, + /// Linked table containing the trail records. + pub records: LinkedTable, + /// Registry of allowed record tags. + pub tags: TagRegistry, + /// Active locking rules for the trail. + pub locking_config: LockingConfig, + /// Role and capability configuration for the trail. + pub roles: RoleMap, + /// Metadata fixed at creation time. + pub immutable_metadata: Option, + /// Metadata that can be updated after creation. + pub updatable_metadata: Option, + /// On-chain package version of the trail object. + pub version: u64, +} + +/// Metadata set at trail creation and never updated. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ImmutableMetadata { + /// Human-readable trail name. + pub name: String, + /// Optional human-readable description. + pub description: Option, +} + +impl ImmutableMetadata { + /// Creates immutable metadata for a trail. + pub fn new(name: String, description: Option) -> Self { + Self { name, description } + } + + pub(in crate::core) fn tag(package_id: ObjectID) -> TypeTag { + TypeTag::from_str(&format!("{package_id}::main::ImmutableMetadata")) + .expect("invalid TypeTag for ImmutableMetadata") + } + + /// Creates a new `Argument` from the `ImmutableMetadata`. + /// + /// To be used when creating a new `ImmutableMetadata` object on the ledger. + pub(in crate::core) fn to_ptb(&self, ptb: &mut Ptb, package_id: ObjectID) -> Result { + let name = tx::ptb_pure(ptb, "name", &self.name)?; + let description = tx::ptb_pure(ptb, "description", &self.description)?; + + Ok(ptb.programmable_move_call( + package_id, + ident_str!("main").into(), + ident_str!("new_trail_metadata").into(), + vec![], + vec![name, description], + )) + } +} diff --git a/audit-trail-rs/src/core/types/event.rs b/audit-trail-rs/src/core/types/event.rs new file mode 100644 index 00000000..988f43bf --- /dev/null +++ b/audit-trail-rs/src/core/types/event.rs @@ -0,0 +1,254 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::HashSet; + +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::collection_types::VecSet; +use serde::{Deserialize, Serialize}; +use serde_aux::field_attributes::{deserialize_number_from_string, deserialize_option_number_from_string}; + +use super::{Permission, PermissionSet, RoleTags}; + +/// Generic wrapper for audit trail events. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Event { + /// Parsed event payload. + #[serde(flatten)] + pub data: D, +} + +/// Event emitted when a trail is created. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct AuditTrailCreated { + /// Newly created trail object ID. + pub trail_id: ObjectID, + /// Address that created the trail. + pub creator: IotaAddress, + /// Millisecond event timestamp. + #[serde(deserialize_with = "deserialize_number_from_string")] + pub timestamp: u64, +} + +/// Event emitted when a trail is deleted. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct AuditTrailDeleted { + /// Deleted trail object ID. + pub trail_id: ObjectID, + /// Millisecond event timestamp. + #[serde(deserialize_with = "deserialize_number_from_string")] + pub timestamp: u64, +} + +/// Event emitted when a record is added. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct RecordAdded { + /// Trail object ID receiving the new record. + pub trail_id: ObjectID, + /// Sequence number assigned to the new record. + #[serde(deserialize_with = "deserialize_number_from_string")] + pub sequence_number: u64, + /// Address that added the record. + pub added_by: IotaAddress, + /// Millisecond event timestamp. + #[serde(deserialize_with = "deserialize_number_from_string")] + pub timestamp: u64, +} + +/// Event emitted when a record is deleted. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct RecordDeleted { + /// Trail object ID from which the record was deleted. + pub trail_id: ObjectID, + /// Sequence number of the deleted record. + #[serde(deserialize_with = "deserialize_number_from_string")] + pub sequence_number: u64, + /// Address that deleted the record. + pub deleted_by: IotaAddress, + /// Millisecond event timestamp. + #[serde(deserialize_with = "deserialize_number_from_string")] + pub timestamp: u64, +} + +/// Event emitted when a capability is issued. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct CapabilityIssued { + /// Trail object ID protected by the capability. + pub target_key: ObjectID, + /// Newly created capability object ID. + pub capability_id: ObjectID, + /// Role granted by the capability. + pub role: String, + /// Address receiving the capability, if one is assigned. + pub issued_to: Option, + /// Millisecond timestamp at which the capability becomes valid. + #[serde(deserialize_with = "deserialize_option_number_from_string")] + pub valid_from: Option, + /// Millisecond timestamp at which the capability expires. + #[serde(deserialize_with = "deserialize_option_number_from_string")] + pub valid_until: Option, +} + +/// Event emitted when a capability object is destroyed. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct CapabilityDestroyed { + /// Trail object ID protected by the capability. + pub target_key: ObjectID, + /// Destroyed capability object ID. + pub capability_id: ObjectID, + /// Role granted by the capability. + pub role: String, + /// Address that held the capability, if any. + pub issued_to: Option, + /// Millisecond timestamp at which the capability became valid. + #[serde(deserialize_with = "deserialize_option_number_from_string")] + pub valid_from: Option, + /// Millisecond timestamp at which the capability expired. + #[serde(deserialize_with = "deserialize_option_number_from_string")] + pub valid_until: Option, +} + +/// Event emitted when a capability is revoked. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct CapabilityRevoked { + /// Trail object ID protected by the capability. + pub target_key: ObjectID, + /// Revoked capability object ID. + pub capability_id: ObjectID, + /// Millisecond timestamp retained for denylist cleanup. + #[serde(deserialize_with = "deserialize_number_from_string")] + pub valid_until: u64, +} + +/// Event emitted when a role is created. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +pub struct RoleCreated { + /// Trail object ID that owns the role. + pub trail_id: ObjectID, + /// Role name. + pub role: String, + /// Permissions granted by the new role. + pub permissions: PermissionSet, + /// Optional record-tag restrictions stored as role data. + pub data: Option, + /// Address that created the role. + pub created_by: IotaAddress, + /// Millisecond event timestamp. + pub timestamp: u64, +} + +/// Event emitted when a role is updated. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +pub struct RoleUpdated { + /// Trail object ID that owns the role. + pub trail_id: ObjectID, + /// Role name. + pub role: String, + /// Updated permissions for the role. + pub permissions: PermissionSet, + /// Updated record-tag restrictions, if any. + pub data: Option, + /// Address that updated the role. + pub updated_by: IotaAddress, + /// Millisecond event timestamp. + pub timestamp: u64, +} + +/// Event emitted when a role is deleted. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +pub struct RoleDeleted { + /// Trail object ID that owned the role. + pub trail_id: ObjectID, + /// Role name. + pub role: String, + /// Address that deleted the role. + pub deleted_by: IotaAddress, + /// Millisecond event timestamp. + pub timestamp: u64, +} + +#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] +pub(crate) struct RawRoleCreated { + target_key: ObjectID, + role: String, + permissions: VecSet, + data: Option, + created_by: IotaAddress, + timestamp: u64, +} + +#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] +pub(crate) struct RawRoleUpdated { + target_key: ObjectID, + role: String, + new_permissions: VecSet, + new_data: Option, + updated_by: IotaAddress, + timestamp: u64, +} + +#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] +pub(crate) struct RawRoleDeleted { + target_key: ObjectID, + role: String, + deleted_by: IotaAddress, + timestamp: u64, +} + +#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] +pub(crate) struct RawRoleTags { + tags: VecSet, +} + +impl From> for PermissionSet { + fn from(value: VecSet) -> Self { + Self { + permissions: value.contents.into_iter().collect::>(), + } + } +} + +impl From for RoleTags { + fn from(value: RawRoleTags) -> Self { + Self { + tags: value.tags.contents.into_iter().collect::>(), + } + } +} + +impl From for RoleCreated { + fn from(value: RawRoleCreated) -> Self { + Self { + trail_id: value.target_key, + role: value.role, + permissions: value.permissions.into(), + data: value.data.map(Into::into), + created_by: value.created_by, + timestamp: value.timestamp, + } + } +} + +impl From for RoleUpdated { + fn from(value: RawRoleUpdated) -> Self { + Self { + trail_id: value.target_key, + role: value.role, + permissions: value.new_permissions.into(), + data: value.new_data.map(Into::into), + updated_by: value.updated_by, + timestamp: value.timestamp, + } + } +} + +impl From for RoleDeleted { + fn from(value: RawRoleDeleted) -> Self { + Self { + trail_id: value.target_key, + role: value.role, + deleted_by: value.deleted_by, + timestamp: value.timestamp, + } + } +} diff --git a/audit-trail-rs/src/core/types/locking.rs b/audit-trail-rs/src/core/types/locking.rs new file mode 100644 index 00000000..eb03205a --- /dev/null +++ b/audit-trail-rs/src/core/types/locking.rs @@ -0,0 +1,171 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use iota_interaction::ident_str; +use iota_interaction::types::base_types::ObjectID; +use iota_interaction::types::programmable_transaction_builder::ProgrammableTransactionBuilder as Ptb; +use iota_interaction::types::transaction::Argument; +use serde::{Deserialize, Serialize}; + +use crate::core::internal::tx; +use crate::error::Error; + +/// Locking configuration for the audit trail. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] +pub struct LockingConfig { + /// Delete-window policy applied to individual records. + pub delete_record_window: LockingWindow, + /// Time lock that gates deletion of the entire trail. + pub delete_trail_lock: TimeLock, + /// Time lock that gates record writes. + pub write_lock: TimeLock, +} + +impl LockingConfig { + /// Creates a new `Argument` from the `LockingConfig`. + /// + /// To be used when creating or updating locking config on the ledger. + pub(in crate::core) fn to_ptb( + &self, + ptb: &mut Ptb, + package_id: ObjectID, + tf_components_package_id: ObjectID, + ) -> Result { + let delete_record_window = self.delete_record_window.to_ptb(ptb, package_id)?; + let delete_trail_lock = self.delete_trail_lock.to_ptb(ptb, tf_components_package_id)?; + let write_lock = self.write_lock.to_ptb(ptb, tf_components_package_id)?; + + Ok(ptb.programmable_move_call( + package_id, + ident_str!("locking").into(), + ident_str!("new").into(), + vec![], + vec![delete_record_window, delete_trail_lock, write_lock], + )) + } +} + +/// Time-based lock for trail-level operations. +/// +/// Must match `tf_components::timelock::TimeLock` variant order for BCS compatibility. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] +pub enum TimeLock { + /// Unlocks at the given Unix timestamp in seconds. + UnlockAt(u32), + /// Unlocks at the given Unix timestamp in milliseconds. + UnlockAtMs(u64), + /// Remains locked until the protected object is explicitly destroyed. + UntilDestroyed, + /// Represents an always-locked state. + Infinite, + /// Disables the time lock. + #[default] + None, +} + +impl TimeLock { + pub(in crate::core) fn to_ptb(&self, ptb: &mut Ptb, package_id: ObjectID) -> Result { + match self { + Self::None => Ok(ptb.programmable_move_call( + package_id, + ident_str!("timelock").into(), + ident_str!("none").into(), + vec![], + vec![], + )), + Self::Infinite => Ok(ptb.programmable_move_call( + package_id, + ident_str!("timelock").into(), + ident_str!("infinite").into(), + vec![], + vec![], + )), + Self::UntilDestroyed => Ok(ptb.programmable_move_call( + package_id, + ident_str!("timelock").into(), + ident_str!("until_destroyed").into(), + vec![], + vec![], + )), + Self::UnlockAt(unix_time) => { + let unix_time = tx::ptb_pure(ptb, "unix_time", *unix_time)?; + let clock = tx::get_clock_ref(ptb); + + Ok(ptb.programmable_move_call( + package_id, + ident_str!("timelock").into(), + ident_str!("unlock_at").into(), + vec![], + vec![unix_time, clock], + )) + } + Self::UnlockAtMs(unix_time_ms) => { + let unix_time_ms = tx::ptb_pure(ptb, "unix_time_ms", *unix_time_ms)?; + let clock = tx::get_clock_ref(ptb); + + Ok(ptb.programmable_move_call( + package_id, + ident_str!("timelock").into(), + ident_str!("unlock_at_ms").into(), + vec![], + vec![unix_time_ms, clock], + )) + } + } + } +} + +/// Defines a locking window (none, time based, or count based). +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] +pub enum LockingWindow { + /// No delete window is enforced. + #[default] + None, + /// Records may be deleted only within the given number of seconds since creation. + TimeBased { + /// Window size in seconds. + seconds: u64, + }, + /// Records may be deleted only within the first `count` subsequent records. + CountBased { + /// Number of subsequent records after which deletion is no longer allowed. + count: u64, + }, +} + +impl LockingWindow { + /// Creates a new `Argument` from the `LockingWindow`. + /// + /// To be used when creating or updating locking config on the ledger. + pub(in crate::core) fn to_ptb(&self, ptb: &mut Ptb, package_id: ObjectID) -> Result { + match self { + Self::None => Ok(ptb.programmable_move_call( + package_id, + ident_str!("locking").into(), + ident_str!("window_none").into(), + vec![], + vec![], + )), + Self::TimeBased { seconds } => { + let seconds = tx::ptb_pure(ptb, "seconds", *seconds)?; + Ok(ptb.programmable_move_call( + package_id, + ident_str!("locking").into(), + ident_str!("window_time_based").into(), + vec![], + vec![seconds], + )) + } + Self::CountBased { count } => { + let count = tx::ptb_pure(ptb, "count", *count)?; + Ok(ptb.programmable_move_call( + package_id, + ident_str!("locking").into(), + ident_str!("window_count_based").into(), + vec![], + vec![count], + )) + } + } + } +} diff --git a/audit-trail-rs/src/core/types/mod.rs b/audit-trail-rs/src/core/types/mod.rs new file mode 100644 index 00000000..486299f2 --- /dev/null +++ b/audit-trail-rs/src/core/types/mod.rs @@ -0,0 +1,27 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Shared serializable domain types for audit trails. +//! +//! These types stay close to the on-chain data model so they can deserialize ledger state and events while also +//! serving as the typed inputs and outputs of the Rust client API. + +/// On-chain trail metadata types. +pub mod audit_trail; +/// Event payload types emitted by audit-trail transactions. +pub mod event; +/// Locking configuration types. +pub mod locking; +/// Permission and permission-set types. +pub mod permission; +/// Record payload and pagination types. +pub mod record; +/// Role, capability, and role-tag types. +pub mod role_map; + +pub use audit_trail::*; +pub use event::*; +pub use locking::*; +pub use permission::*; +pub use record::*; +pub use role_map::*; diff --git a/audit-trail-rs/src/core/types/permission.rs b/audit-trail-rs/src/core/types/permission.rs new file mode 100644 index 00000000..57a50906 --- /dev/null +++ b/audit-trail-rs/src/core/types/permission.rs @@ -0,0 +1,180 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::HashSet; +use std::str::FromStr; + +use iota_interaction::ident_str; +use iota_interaction::types::base_types::ObjectID; +use iota_interaction::types::programmable_transaction_builder::ProgrammableTransactionBuilder as Ptb; +use iota_interaction::types::transaction::{Argument, Command}; +use iota_interaction::types::{Identifier, TypeTag}; +use serde::{Deserialize, Serialize}; + +use crate::error::Error; + +/// Audit-trail permission variants mirrored from the Move permission module. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Hash)] +pub enum Permission { + /// Allows deleting the entire trail. + DeleteAuditTrail, + /// Allows deleting all records in batch form. + DeleteAllRecords, + /// Allows adding records. + AddRecord, + /// Allows deleting individual records. + DeleteRecord, + /// Allows creating correction records. + CorrectRecord, + /// Allows updating the full locking configuration. + UpdateLockingConfig, + /// Allows updating the delete-record window. + UpdateLockingConfigForDeleteRecord, + /// Allows updating the delete-trail time lock. + UpdateLockingConfigForDeleteTrail, + /// Allows updating the write lock. + UpdateLockingConfigForWrite, + /// Allows creating roles. + AddRoles, + /// Allows updating roles. + UpdateRoles, + /// Allows deleting roles. + DeleteRoles, + /// Allows issuing capabilities. + AddCapabilities, + /// Allows revoking capabilities. + RevokeCapabilities, + /// Allows updating mutable metadata. + UpdateMetadata, + /// Allows deleting mutable metadata. + DeleteMetadata, + /// Allows migrating the trail to a newer package version. + Migrate, + /// Allows adding trail-owned record tags. + AddRecordTags, + /// Allows deleting trail-owned record tags. + DeleteRecordTags, +} + +impl Permission { + /// Returns the Move constructor function name for this permission variant. + pub(crate) fn function_name(&self) -> &'static str { + match self { + Self::DeleteAuditTrail => "delete_audit_trail", + Self::DeleteAllRecords => "delete_all_records", + Self::AddRecord => "add_record", + Self::DeleteRecord => "delete_record", + Self::CorrectRecord => "correct_record", + Self::UpdateLockingConfig => "update_locking_config", + Self::UpdateLockingConfigForDeleteRecord => "update_locking_config_for_delete_record", + Self::UpdateLockingConfigForDeleteTrail => "update_locking_config_for_delete_trail", + Self::UpdateLockingConfigForWrite => "update_locking_config_for_write", + Self::AddRecordTags => "add_record_tags", + Self::DeleteRecordTags => "delete_record_tags", + Self::AddRoles => "add_roles", + Self::UpdateRoles => "update_roles", + Self::DeleteRoles => "delete_roles", + Self::AddCapabilities => "add_capabilities", + Self::RevokeCapabilities => "revoke_capabilities", + Self::UpdateMetadata => "update_metadata", + Self::DeleteMetadata => "delete_metadata", + Self::Migrate => "migrate_audit_trail", + } + } + + pub(crate) fn tag(package_id: ObjectID) -> TypeTag { + TypeTag::from_str(&format!("{package_id}::permission::Permission")).expect("invalid TypeTag for Permission") + } + + pub(in crate::core) fn to_ptb(self, ptb: &mut Ptb, package_id: ObjectID) -> Result { + let function = Identifier::from_str(self.function_name()) + .map_err(|e| Error::InvalidArgument(format!("Failed to create identifier for function: {e}")))?; + + Ok(ptb.programmable_move_call(package_id, ident_str!("permission").into(), function, vec![], vec![])) + } +} + +/// Convenience wrapper around a set of [`Permission`] values. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] +pub struct PermissionSet { + /// Permissions granted by this set. + pub permissions: HashSet, +} + +impl PermissionSet { + pub(crate) fn to_move_vec(&self, package_id: ObjectID, ptb: &mut Ptb) -> Result { + let permission_type = Permission::tag(package_id); + let permission_args: Vec<_> = self + .permissions + .iter() + .map(|permission| (*permission).to_ptb(ptb, package_id)) + .collect::, _>>()?; + + Ok(ptb.command(Command::MakeMoveVec(Some(permission_type.into()), permission_args))) + } + /// Returns the recommended role-administration permissions. + pub fn admin_permissions() -> Self { + Self { + permissions: HashSet::from([ + Permission::AddCapabilities, + Permission::RevokeCapabilities, + Permission::AddRecordTags, + Permission::DeleteRecordTags, + Permission::AddRoles, + Permission::UpdateRoles, + Permission::DeleteRoles, + ]), + } + } + + /// Returns the permissions needed to administer records. + pub fn record_admin_permissions() -> Self { + Self { + permissions: HashSet::from([ + Permission::AddRecord, + Permission::DeleteRecord, + Permission::CorrectRecord, + ]), + } + } + + /// Returns the permissions needed to administer locking rules. + pub fn locking_admin_permissions() -> Self { + Self { + permissions: HashSet::from([ + Permission::UpdateLockingConfig, + Permission::UpdateLockingConfigForDeleteTrail, + Permission::UpdateLockingConfigForDeleteRecord, + Permission::UpdateLockingConfigForWrite, + ]), + } + } + + /// Returns the permissions needed to administer roles. + pub fn role_admin_permissions() -> Self { + Self { + permissions: HashSet::from([Permission::AddRoles, Permission::UpdateRoles, Permission::DeleteRoles]), + } + } + + /// Returns the permissions needed to administer record tags. + pub fn tag_admin_permissions() -> Self { + Self { + permissions: HashSet::from([Permission::AddRecordTags, Permission::DeleteRecordTags]), + } + } + + /// Returns the permissions needed to issue and revoke capabilities. + pub fn cap_admin_permissions() -> Self { + Self { + permissions: HashSet::from_iter(vec![Permission::AddCapabilities, Permission::RevokeCapabilities]), + } + } + + /// Returns the permissions needed to administer mutable metadata. + pub fn metadata_admin_permissions() -> Self { + Self { + permissions: HashSet::from_iter(vec![Permission::UpdateMetadata, Permission::DeleteMetadata]), + } + } +} diff --git a/audit-trail-rs/src/core/types/record.rs b/audit-trail-rs/src/core/types/record.rs new file mode 100644 index 00000000..e18e1ff2 --- /dev/null +++ b/audit-trail-rs/src/core/types/record.rs @@ -0,0 +1,297 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::{BTreeMap, HashSet}; +use std::str::FromStr; + +use iota_interaction::ident_str; +use iota_interaction::types::TypeTag; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::programmable_transaction_builder::ProgrammableTransactionBuilder as Ptb; +use iota_interaction::types::transaction::Argument; +use serde::{Deserialize, Serialize}; + +use crate::core::internal::tx; +use crate::error::Error; + +/// Page of records loaded through linked-table traversal. +#[derive(Debug, Clone)] +pub struct PaginatedRecord { + /// Records included in the current page, keyed by sequence number. + pub records: BTreeMap>, + /// Cursor to pass to the next [`TrailRecords::list_page`](crate::core::records::TrailRecords::list_page) call. + pub next_cursor: Option, + /// Indicates whether another page may be available. + pub has_next_page: bool, +} + +/// A single record in the audit trail. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Record { + /// Record payload stored on-chain. + pub data: D, + /// Optional application-defined metadata. + pub metadata: Option, + /// Optional trail-owned tag attached to the record. + pub tag: Option, + /// Monotonic record sequence number inside the trail. + pub sequence_number: u64, + /// Address that added the record. + pub added_by: IotaAddress, + /// Millisecond timestamp at which the record was added. + pub added_at: u64, + /// Correction relationships for this record. + pub correction: RecordCorrection, +} + +/// Input used when creating a trail with an initial record. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct InitialRecord { + /// Initial payload to store in the trail. + pub data: D, + /// Optional application-defined metadata. + pub metadata: Option, + /// Optional initial tag from the trail-owned registry. + pub tag: Option, +} + +impl InitialRecord { + /// Creates a new initial record. + /// + /// # Examples + /// + /// ```rust + /// use audit_trail::core::types::{Data, InitialRecord}; + /// + /// let record = InitialRecord::new( + /// Data::text("hello"), + /// Some("seed".to_string()), + /// Some("inbox".to_string()), + /// ); + /// + /// assert_eq!(record.data, Data::text("hello")); + /// assert_eq!(record.metadata.as_deref(), Some("seed")); + /// assert_eq!(record.tag.as_deref(), Some("inbox")); + /// ``` + pub fn new(data: impl Into, metadata: Option, tag: Option) -> Self { + Self { + data: data.into(), + metadata, + tag, + } + } + + pub(crate) fn tag(package_id: ObjectID) -> TypeTag { + TypeTag::from_str(&format!( + "{package_id}::record::InitialRecord<{}>", + Data::tag(package_id) + )) + .expect("invalid TypeTag for InitialRecord") + } + + pub(in crate::core) fn into_ptb(self, ptb: &mut Ptb, package_id: ObjectID) -> Result { + let data_tag = Data::tag(package_id); + let data = self.data.into_ptb(ptb, package_id)?; + let metadata = tx::ptb_pure(ptb, "initial_record_metadata", self.metadata)?; + let tag = tx::ptb_pure(ptb, "initial_record_tag", self.tag)?; + + Ok(ptb.programmable_move_call( + package_id, + ident_str!("record").into(), + ident_str!("new_initial_record").into(), + vec![data_tag], + vec![data, metadata, tag], + )) + } +} + +/// Bidirectional correction tracking for audit records. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] +pub struct RecordCorrection { + /// Sequence numbers that this record supersedes. + pub replaces: HashSet, + /// Sequence number of the record that supersedes this one, if any. + pub is_replaced_by: Option, +} + +impl RecordCorrection { + /// Creates a correction value that replaces the given sequence numbers. + pub fn with_replaces(replaces: HashSet) -> Self { + Self { + replaces, + is_replaced_by: None, + } + } + + /// Returns `true` when this record supersedes at least one earlier record. + /// + /// # Examples + /// + /// ```rust + /// use std::collections::HashSet; + /// + /// use audit_trail::core::types::RecordCorrection; + /// + /// let correction = RecordCorrection::with_replaces(HashSet::from([1, 2])); + /// + /// assert!(correction.is_correction()); + /// ``` + pub fn is_correction(&self) -> bool { + !self.replaces.is_empty() + } + + /// Returns `true` when this record has itself been replaced by a later record. + pub fn is_replaced(&self) -> bool { + self.is_replaced_by.is_some() + } +} + +/// Supported record data types. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum Data { + /// Arbitrary binary payload. + Bytes(Vec), + /// UTF-8 text payload. + Text(String), +} + +impl Data { + /// Returns the Move type tag for `record::Data`. + pub(crate) fn tag(package_id: ObjectID) -> TypeTag { + TypeTag::from_str(&format!("{package_id}::record::Data")).expect("should be valid type tag") + } + + /// Creates a PTB argument for `record::Data`. + pub(in crate::core) fn into_ptb(self, ptb: &mut Ptb, package_id: ObjectID) -> Result { + match self { + Data::Bytes(bytes) => { + let bytes = tx::ptb_pure(ptb, "data_bytes", bytes)?; + Ok(ptb.programmable_move_call( + package_id, + ident_str!("record").into(), + ident_str!("new_bytes").into(), + vec![], + vec![bytes], + )) + } + Data::Text(text) => { + let text = tx::ptb_pure(ptb, "data_text", text)?; + Ok(ptb.programmable_move_call( + package_id, + ident_str!("record").into(), + ident_str!("new_text").into(), + vec![], + vec![text], + )) + } + } + } + + /// Validates that the on-chain trail stores `record::Data`. + pub(in crate::core) fn ensure_matches_tag(&self, expected: &TypeTag, package_id: ObjectID) -> Result<(), Error> { + let actual = Self::tag(package_id); + + if &actual == expected { + Ok(()) + } else { + Err(Error::InvalidArgument(format!( + "record data type mismatch: trail expects {:?}, SDK writes {:?}", + expected, actual + ))) + } + } + + /// Creates a new `Data` from bytes. + /// + /// # Examples + /// + /// ```rust + /// use audit_trail::core::types::Data; + /// + /// assert_eq!(Data::bytes([1_u8, 2, 3]), Data::Bytes(vec![1, 2, 3])); + /// ``` + pub fn bytes(data: impl Into>) -> Self { + Self::Bytes(data.into()) + } + + /// Creates a new `Data` from text. + /// + /// # Examples + /// + /// ```rust + /// use audit_trail::core::types::Data; + /// + /// assert_eq!(Data::text("hello"), Data::Text("hello".to_string())); + /// ``` + pub fn text(data: impl Into) -> Self { + Self::Text(data.into()) + } + + /// Extracts the data as bytes. + /// + /// ## Errors + /// + /// Returns an error if the data is text rather than bytes. + pub fn as_bytes(self) -> Result, Error> { + match self { + Data::Bytes(data) => Ok(data), + Data::Text(_) => Err(Error::GenericError("Data is not bytes".to_string())), + } + } + + /// Extracts the data as text. + /// + /// ## Errors + /// + /// Returns an error if the data is bytes rather than text. + pub fn as_text(self) -> Result { + match self { + Data::Bytes(_) => Err(Error::GenericError("Data is not text".to_string())), + Data::Text(data) => Ok(data), + } + } +} + +impl From for Data { + fn from(value: String) -> Self { + Data::Text(value) + } +} + +impl From<&str> for Data { + fn from(value: &str) -> Self { + Data::Text(value.to_string()) + } +} + +impl From> for Data { + fn from(value: Vec) -> Self { + Data::Bytes(value) + } +} + +impl From<&[u8]> for Data { + fn from(value: &[u8]) -> Self { + Data::Bytes(value.to_vec()) + } +} + +#[cfg(test)] +mod tests { + use super::Data; + + #[test] + fn data_bcs_roundtrip_preserves_text_variant() { + let encoded = bcs::to_bytes(&Data::Text("hello world".to_string())).expect("failed to encode Data"); + let data = bcs::from_bytes::(&encoded).expect("failed to decode Data"); + assert_eq!(data, Data::Text("hello world".to_string())); + } + + #[test] + fn data_bcs_roundtrip_preserves_bytes_variant() { + let encoded = + bcs::to_bytes(&Data::Bytes(vec![0x47, 0x49, 0x46, 0x38, 0x39, 0x61])).expect("failed to encode Data"); + let data = bcs::from_bytes::(&encoded).expect("failed to decode Data"); + assert_eq!(data, Data::Bytes(vec![0x47, 0x49, 0x46, 0x38, 0x39, 0x61])); + } +} diff --git a/audit-trail-rs/src/core/types/role_map.rs b/audit-trail-rs/src/core/types/role_map.rs new file mode 100644 index 00000000..29e45c41 --- /dev/null +++ b/audit-trail-rs/src/core/types/role_map.rs @@ -0,0 +1,224 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::{HashMap, HashSet}; +use std::str::FromStr; + +use iota_interaction::types::TypeTag; +use iota_interaction::types::base_types::{IotaAddress, ObjectID}; +use iota_interaction::types::collection_types::LinkedTable; +use iota_interaction::types::id::UID; +use iota_interaction::types::programmable_transaction_builder::ProgrammableTransactionBuilder as Ptb; +use iota_interaction::types::transaction::Argument; +use iota_interaction::{MoveType, ident_str}; +use serde::{Deserialize, Serialize}; +use serde_aux::field_attributes::deserialize_option_number_from_string; + +use super::permission::Permission; +use crate::core::internal::move_collections::{deserialize_vec_map, deserialize_vec_set}; +use crate::core::internal::tx; +use crate::error::Error; + +/// Role and capability configuration stored on a trail. +/// +/// This mirrors the access-control state maintained by the Move package, including the reserved initial-admin +/// role, the revoked-capability denylist, and the role data used for tag-aware authorization. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct RoleMap { + /// Trail object ID that this role map protects. + pub target_key: ObjectID, + /// Role definitions keyed by role name. + #[serde(deserialize_with = "deserialize_vec_map")] + pub roles: HashMap, + /// Reserved role name used for initial-admin capabilities. + pub initial_admin_role_name: String, + /// Denylist of revoked capability IDs. + pub revoked_capabilities: LinkedTable, + /// Capability IDs currently recognized as initial-admin capabilities. + #[serde(deserialize_with = "deserialize_vec_set")] + pub initial_admin_cap_ids: HashSet, + /// Permissions required to administer roles. + pub role_admin_permissions: RoleAdminPermissions, + /// Permissions required to administer capabilities. + pub capability_admin_permissions: CapabilityAdminPermissions, +} + +/// Role definition stored in the trail role map. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Role { + /// Permissions granted by the role. + #[serde(deserialize_with = "deserialize_vec_set")] + pub permissions: HashSet, + /// Optional role-scoped record-tag restrictions. + pub data: Option, +} + +/// Permissions required to administer roles in the trail's access-control state. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct RoleAdminPermissions { + /// Permission required to create roles. + pub add: Permission, + /// Permission required to delete roles. + pub delete: Permission, + /// Permission required to update roles. + pub update: Permission, +} + +/// Permissions required to administer capabilities in the trail's access-control state. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct CapabilityAdminPermissions { + /// Permission required to issue capabilities. + pub add: Permission, + /// Permission required to revoke capabilities. + pub revoke: Permission, +} + +/// Capability issuance options used by the role-based API. +/// +/// These fields only configure restrictions on the issued capability object. Matching against the current +/// caller and timestamp happens when the capability is later used. +#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] +pub struct CapabilityIssueOptions { + /// Address that should own the capability, if any. + pub issued_to: Option, + /// Millisecond timestamp at which the capability becomes valid. + pub valid_from_ms: Option, + /// Millisecond timestamp at which the capability expires. + pub valid_until_ms: Option, +} + +/// Allowlisted record tags stored as role data. +/// +/// The Rust name stays `RecordTags` for API continuity, but it maps to Move `record_tags::RoleTags`. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] +pub struct RoleTags { + /// Allowlisted record tags for the role. + #[serde(deserialize_with = "deserialize_vec_set")] + pub tags: HashSet, +} + +impl RoleTags { + /// Creates role-tag restrictions from an iterator of tag names. + /// + /// The set is deduplicated, and PTB encoding later sorts the tags for deterministic serialization. + pub fn new(tags: I) -> Self + where + I: IntoIterator, + S: Into, + { + Self { + tags: tags.into_iter().map(Into::into).collect(), + } + } + + /// Returns `true` when the given tag is allowed for the role. + pub fn allows(&self, tag: &str) -> bool { + self.tags.contains(tag) + } + + pub(crate) fn tag(package_id: ObjectID) -> TypeTag { + TypeTag::from_str(&format!("{package_id}::record_tags::RoleTags")).expect("invalid TypeTag for RoleTags") + } + + pub(in crate::core) fn to_ptb(&self, ptb: &mut Ptb, package_id: ObjectID) -> Result { + let mut tags = self.tags.iter().cloned().collect::>(); + tags.sort(); + let tags_arg = tx::ptb_pure(ptb, "tags", tags)?; + + Ok(ptb.programmable_move_call( + package_id, + ident_str!("record_tags").into(), + ident_str!("new_role_tags").into(), + vec![], + vec![tags_arg], + )) + } +} + +/// Capability data returned by the Move capability module. +/// +/// A capability grants exactly one role against exactly one trail and may additionally restrict who may use it +/// and during which time window it is valid. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Capability { + /// Capability object ID. + pub id: UID, + /// Trail object ID protected by the capability. + pub target_key: ObjectID, + /// Role granted by the capability. + pub role: String, + /// Capability holder, if the capability is assigned to an address. + pub issued_to: Option, + /// Millisecond timestamp at which the capability becomes valid. + #[serde(deserialize_with = "deserialize_option_number_from_string")] + pub valid_from: Option, + /// Millisecond timestamp at which the capability expires. + #[serde(deserialize_with = "deserialize_option_number_from_string")] + pub valid_until: Option, +} + +impl Capability { + pub(crate) fn type_tag(package_id: ObjectID) -> TypeTag { + TypeTag::from_str(format!("{package_id}::capability::Capability").as_str()).expect("failed to create type tag") + } + + pub(crate) fn matches_target_and_role(&self, trail_id: ObjectID, valid_roles: &HashSet) -> bool { + self.target_key == trail_id && valid_roles.contains(&self.role) + } +} + +impl MoveType for Capability { + fn move_type(package: ObjectID) -> TypeTag { + Self::type_tag(package) + } +} + +#[cfg(test)] +mod tests { + use iota_interaction::types::base_types::{IotaAddress, dbg_object_id}; + use iota_interaction::types::id::UID; + use serde_json::json; + + use super::Capability; + + #[test] + fn capability_deserializes_string_encoded_time_constraints() { + let issued_to = IotaAddress::random_for_testing_only(); + let capability = Capability { + id: UID::new(dbg_object_id(1)), + target_key: dbg_object_id(2), + role: "Writer".to_string(), + issued_to: Some(issued_to), + valid_from: None, + valid_until: None, + }; + + let mut value = serde_json::to_value(capability).expect("capability serializes"); + value["valid_from"] = json!("1700000000000"); + value["valid_until"] = json!("1700000005000"); + + let decoded: Capability = serde_json::from_value(value).expect("capability deserializes"); + + assert_eq!(decoded.valid_from, Some(1_700_000_000_000)); + assert_eq!(decoded.valid_until, Some(1_700_000_005_000)); + assert_eq!(decoded.issued_to, Some(issued_to)); + } + + #[test] + fn capability_deserializes_absent_time_constraints() { + let capability = Capability { + id: UID::new(dbg_object_id(4)), + target_key: dbg_object_id(5), + role: "Writer".to_string(), + issued_to: None, + valid_from: None, + valid_until: None, + }; + + let value = serde_json::to_value(capability).expect("capability serializes"); + let decoded: Capability = serde_json::from_value(value).expect("capability deserializes"); + + assert_eq!(decoded.valid_from, None); + assert_eq!(decoded.valid_until, None); + } +} diff --git a/audit-trail-rs/src/error.rs b/audit-trail-rs/src/error.rs new file mode 100644 index 00000000..af81958a --- /dev/null +++ b/audit-trail-rs/src/error.rs @@ -0,0 +1,50 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Error types returned by the audit-trail public API. + +use crate::iota_interaction_adapter::AdapterError; + +/// Errors that can occur when reading or mutating audit trails. +#[derive(Debug, thiserror::Error, strum::IntoStaticStr)] +#[non_exhaustive] +pub enum Error { + /// Returned when a signer key or public key cannot be derived or validated. + #[error("invalid key: {0}")] + InvalidKey(String), + /// Returned when client configuration or package-ID configuration is invalid. + #[error("invalid config: {0}")] + InvalidConfig(String), + /// Returned when an RPC request fails. + #[error("RPC error: {0}")] + RpcError(String), + /// Error returned by the underlying IOTA client adapter. + #[error("IOTA client error: {0}")] + IotaClient(#[from] AdapterError), + /// Generic catch-all error for crate-specific failures that do not fit a narrower variant. + #[error("{0}")] + GenericError(String), + /// Placeholder for unimplemented API surface. + #[error("not implemented: {0}")] + NotImplemented(&'static str), + /// Returned when a Move tag cannot be parsed. + #[error("Failed to parse tag: {0}")] + FailedToParseTag(String), + /// Returned when an argument is semantically invalid. + #[error("Invalid argument: {0}")] + InvalidArgument(String), + /// The response from the IOTA node API was not in the expected format. + #[error("unexpected API response: {0}")] + UnexpectedApiResponse(String), + /// Failed to deserialize data using BCS. + #[error("BCS deserialization error: {0}")] + DeserializationError(#[from] bcs::Error), + /// The transaction response from the IOTA node API was not in the expected format. + #[error("unexpected transaction response: {0}")] + TransactionUnexpectedResponse(String), +} + +#[cfg(target_arch = "wasm32")] +use product_common::impl_wasm_error_from; +#[cfg(target_arch = "wasm32")] +impl_wasm_error_from!(Error); diff --git a/audit-trail-rs/src/iota_interaction_adapter.rs b/audit-trail-rs/src/iota_interaction_adapter.rs new file mode 100644 index 00000000..c2db171b --- /dev/null +++ b/audit-trail-rs/src/iota_interaction_adapter.rs @@ -0,0 +1,12 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Platform-dependent adapter re-exports for the underlying IOTA interaction layer. +//! +//! This keeps the rest of the crate generic over native and wasm targets by exposing the same +//! adapter names from either `iota_interaction_rust` or `iota_interaction_ts`. + +#[cfg(not(target_arch = "wasm32"))] +pub(crate) use iota_interaction_rust::*; +#[cfg(target_arch = "wasm32")] +pub(crate) use iota_interaction_ts::*; diff --git a/audit-trail-rs/src/lib.rs b/audit-trail-rs/src/lib.rs new file mode 100644 index 00000000..82f6f73e --- /dev/null +++ b/audit-trail-rs/src/lib.rs @@ -0,0 +1,22 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +#![doc = include_str!("../README.md")] +#![warn(missing_docs, rustdoc::all)] + +/// Client wrappers for read-only and signing access to audit trails. +pub mod client; +/// Core handles, builders, transactions, and domain types. +pub mod core; +/// Error types returned by the public API. +pub mod error; +pub(crate) mod iota_interaction_adapter; +pub(crate) mod package; + +/// A signing audit-trail client that can build write transactions. +pub use client::full_client::AuditTrailClient; +/// Read-only client types and package override configuration. +pub use client::read_only::{AuditTrailClientReadOnly, PackageOverrides}; +/// HTTP utilities to implement the trait [HttpClient](product_common::http_client::HttpClient). +#[cfg(feature = "gas-station")] +pub use product_common::http_client; diff --git a/audit-trail-rs/src/package.rs b/audit-trail-rs/src/package.rs new file mode 100644 index 00000000..0d443bb7 --- /dev/null +++ b/audit-trail-rs/src/package.rs @@ -0,0 +1,137 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Package management for audit trail smart contracts. +//! +//! This module handles package ID resolution and registry management +//! for the audit trail Move contracts. + +#![allow(dead_code)] + +use std::sync::LazyLock; + +use iota_interaction::types::base_types::ObjectID; +use product_common::network_name::NetworkName; +use product_common::package_registry::{Env, PackageRegistry}; +use product_common::tf_components_registry; +use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard, TryLockError}; + +use crate::client::PackageOverrides; +use crate::error::Error; + +type PackageRegistryLock = RwLockReadGuard<'static, PackageRegistry>; +type PackageRegistryLockMut = RwLockWriteGuard<'static, PackageRegistry>; + +/// Global registry for audit trail package information. +static AUDIT_TRAIL_PACKAGE_REGISTRY: LazyLock> = LazyLock::new(|| { + let package_history_json = include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/../audit-trail-move/Move.history.json" + )); + RwLock::new( + PackageRegistry::from_package_history_json_str(package_history_json) + .expect("Move.history.json exists and it's valid"), + ) +}); + +/// Runtime overrides for TfComponents package information. +static TF_COMPONENTS_OVERRIDE_REGISTRY: LazyLock> = + LazyLock::new(|| RwLock::new(PackageRegistry::default())); + +/// Returns a read lock to the package registry. +pub(crate) async fn audit_trail_package_registry() -> PackageRegistryLock { + AUDIT_TRAIL_PACKAGE_REGISTRY.read().await +} + +/// Attempts to acquire a read lock without blocking. +pub(crate) fn try_audit_trail_package_registry() -> Result { + AUDIT_TRAIL_PACKAGE_REGISTRY.try_read() +} + +/// Returns a blocking read lock to the package registry. +pub(crate) fn blocking_audit_trail_registry() -> PackageRegistryLock { + AUDIT_TRAIL_PACKAGE_REGISTRY.blocking_read() +} + +/// Returns a write lock to the package registry. +pub(crate) async fn audit_trail_package_registry_mut() -> PackageRegistryLockMut { + AUDIT_TRAIL_PACKAGE_REGISTRY.write().await +} + +/// Attempts to acquire a write lock without blocking. +pub(crate) fn try_audit_trail_package_registry_mut() -> Result { + AUDIT_TRAIL_PACKAGE_REGISTRY.try_write() +} + +/// Returns a blocking write lock to the package registry. +pub(crate) fn blocking_audit_trail_registry_mut() -> PackageRegistryLockMut { + AUDIT_TRAIL_PACKAGE_REGISTRY.blocking_write() +} + +pub(crate) async fn tf_components_override_registry_mut() -> PackageRegistryLockMut { + TF_COMPONENTS_OVERRIDE_REGISTRY.write().await +} + +#[derive(Debug, Clone, Copy)] +pub(crate) struct ResolvedPackageIds { + pub audit_trail_package_id: ObjectID, + pub tf_components_package_id: ObjectID, +} + +pub(crate) async fn resolve_package_ids( + network: &NetworkName, + package_overrides: &PackageOverrides, +) -> Result<(NetworkName, ResolvedPackageIds), Error> { + let chain_id = network.as_ref().to_string(); + let package_registry = audit_trail_package_registry().await; + let audit_trail_package_id = package_overrides + .audit_trail + .or_else(|| package_registry.package_id(network)) + .ok_or_else(|| { + Error::InvalidConfig(format!( + "no information for a published `audit_trail` package on network {network}; try to use `AuditTrailClientReadOnly::new_with_package_overrides`" + )) + })?; + let resolved_network = match chain_id.as_str() { + product_common::package_registry::MAINNET_CHAIN_ID => { + NetworkName::try_from("iota").expect("valid network name") + } + _ => package_registry + .chain_alias(&chain_id) + .and_then(|alias| NetworkName::try_from(alias).ok()) + .unwrap_or_else(|| network.clone()), + }; + + drop(package_registry); + + let env = Env::new_with_alias(chain_id.clone(), resolved_network.as_ref()); + if let Some(audit_trail_package_id) = package_overrides.audit_trail { + audit_trail_package_registry_mut() + .await + .insert_env_history(env.clone(), vec![audit_trail_package_id]); + } + if let Some(tf_components_package_id) = package_overrides.tf_component { + tf_components_override_registry_mut() + .await + .insert_env_history(env, vec![tf_components_package_id]); + } + + let tf_components_package_id = resolve_tf_components_package_id(resolved_network.as_ref()).await.ok_or_else(|| { + Error::InvalidConfig(format!( + "no information for a published `TfComponents` package on network {network}; try to use `AuditTrailClientReadOnly::new_with_package_overrides`" + )) + })?; + + Ok(( + resolved_network, + ResolvedPackageIds { + audit_trail_package_id, + tf_components_package_id, + }, + )) +} + +pub(crate) async fn resolve_tf_components_package_id(network: &str) -> Option { + let override_package_id = TF_COMPONENTS_OVERRIDE_REGISTRY.read().await.package_id(network); + override_package_id.or_else(|| tf_components_registry::tf_components_package_id(network)) +} diff --git a/audit-trail-rs/tests/e2e/access.rs b/audit-trail-rs/tests/e2e/access.rs new file mode 100644 index 00000000..94b4e2d0 --- /dev/null +++ b/audit-trail-rs/tests/e2e/access.rs @@ -0,0 +1,577 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::HashSet; + +use audit_trail::core::types::{CapabilityIssueOptions, Data, Permission, PermissionSet, RoleTags}; +use iota_interaction::types::base_types::IotaAddress; +use product_common::core_client::CoreClient; + +use crate::client::get_funded_test_client; + +#[tokio::test] +async fn create_role_then_issue_capability_default_options() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("access-e2e")).await?; + let role_name = "auditor"; + + client + .create_role(trail_id, role_name, vec![Permission::AddRecord], None) + .await?; + + let issued = client + .issue_cap(trail_id, role_name, CapabilityIssueOptions::default()) + .await?; + + assert_eq!(issued.target_key, trail_id); + assert_eq!(issued.role, role_name.to_string()); + assert_eq!(issued.issued_to, None); + assert_eq!(issued.valid_from, None); + assert_eq!(issued.valid_until, None); + + Ok(()) +} + +#[tokio::test] +async fn update_role_permissions_then_issue_capability() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("access-e2e")).await?; + let access = client.trail(trail_id).access(); + let role_name = "editor"; + + client + .create_role(trail_id, role_name, vec![Permission::AddRecord], None) + .await?; + + let updated = access + .for_role(role_name) + .update_permissions( + PermissionSet { + permissions: HashSet::from([Permission::AddRecord, Permission::DeleteRecord]), + }, + None, + ) + .build_and_execute(&client) + .await? + .output; + assert_eq!(updated.trail_id, trail_id); + assert_eq!(updated.role, role_name.to_string()); + assert_eq!( + updated.permissions.permissions, + HashSet::from([Permission::AddRecord, Permission::DeleteRecord]) + ); + assert_eq!(updated.data, None); + assert_eq!(updated.updated_by, client.sender_address()); + assert!(updated.timestamp > 0); + + let issued = client + .issue_cap(trail_id, role_name, CapabilityIssueOptions::default()) + .await?; + assert_eq!(issued.target_key, trail_id); + assert_eq!(issued.role, role_name.to_string()); + + Ok(()) +} + +#[tokio::test] +async fn delegated_role_and_capability_admins_can_enable_record_writes() -> anyhow::Result<()> { + let admin = get_funded_test_client().await?; + let role_admin = get_funded_test_client().await?; + let cap_admin = get_funded_test_client().await?; + let record_admin = get_funded_test_client().await?; + let trail_id = admin.create_test_trail(Data::text("delegated-access-flow")).await?; + + admin + .create_role( + trail_id, + "RoleAdmin", + PermissionSet::role_admin_permissions().permissions, + None, + ) + .await?; + admin + .create_role( + trail_id, + "CapAdmin", + PermissionSet::cap_admin_permissions().permissions, + None, + ) + .await?; + admin + .issue_cap( + trail_id, + "RoleAdmin", + CapabilityIssueOptions { + issued_to: Some(role_admin.sender_address()), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + admin + .issue_cap( + trail_id, + "CapAdmin", + CapabilityIssueOptions { + issued_to: Some(cap_admin.sender_address()), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + + role_admin + .create_role( + trail_id, + "RecordAdmin", + PermissionSet::record_admin_permissions().permissions, + None, + ) + .await?; + cap_admin + .issue_cap( + trail_id, + "RecordAdmin", + CapabilityIssueOptions { + issued_to: Some(record_admin.sender_address()), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + + let added = record_admin + .trail(trail_id) + .records() + .add(Data::text("delegated write"), None, None) + .build_and_execute(&record_admin) + .await? + .output; + + assert_eq!(added.trail_id, trail_id); + assert_eq!(added.sequence_number, 1); + + let record = admin.trail(trail_id).records().get(1).await?; + assert_eq!(record.sequence_number, 1); + assert_eq!(record.added_by, record_admin.sender_address()); + assert_eq!(record.data, Data::text("delegated write")); + + Ok(()) +} + +#[tokio::test] +async fn create_role_rejects_undefined_role_tags() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail_with_tags(Data::text("roles-undefined-create"), ["legal"]) + .await?; + + let created = client + .create_role( + trail_id, + "tagged-writer", + vec![Permission::AddRecord], + Some(RoleTags::new(["finance"])), + ) + .await; + + assert!( + created.is_err(), + "creating a role with tags outside the trail registry must fail" + ); + + Ok(()) +} + +#[tokio::test] +async fn update_role_permissions_rejects_undefined_role_tags() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail_with_tags(Data::text("roles-undefined-update"), ["legal"]) + .await?; + let access = client.trail(trail_id).access(); + let role_name = "editor"; + + client + .create_role(trail_id, role_name, vec![Permission::AddRecord], None) + .await?; + + let updated = access + .for_role(role_name) + .update_permissions( + PermissionSet { + permissions: HashSet::from([Permission::AddRecord]), + }, + Some(RoleTags::new(["finance"])), + ) + .build_and_execute(&client) + .await; + + assert!( + updated.is_err(), + "updating a role with tags outside the trail registry must fail" + ); + + Ok(()) +} + +#[tokio::test] +async fn issue_capability_for_nonexistent_role_fails() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("missing-role-cap")).await?; + + let issued = client + .trail(trail_id) + .access() + .for_role("NonExistentRole") + .issue_capability(CapabilityIssueOptions::default()) + .build_and_execute(&client) + .await; + + assert!(issued.is_err(), "issuing a capability for a missing role must fail"); + + Ok(()) +} + +#[tokio::test] +async fn issue_capability_requires_add_capabilities_permission() -> anyhow::Result<()> { + let admin = get_funded_test_client().await?; + let operator = get_funded_test_client().await?; + let trail_id = admin.create_test_trail(Data::text("missing-cap-permission")).await?; + + admin + .create_role(trail_id, "NoCapPerm", vec![Permission::AddRecord], None) + .await?; + admin + .create_role( + trail_id, + "RecordAdmin", + PermissionSet::record_admin_permissions().permissions, + None, + ) + .await?; + admin + .issue_cap( + trail_id, + "NoCapPerm", + CapabilityIssueOptions { + issued_to: Some(operator.sender_address()), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + + let issued = operator + .trail(trail_id) + .access() + .for_role("RecordAdmin") + .issue_capability(CapabilityIssueOptions::default()) + .build_and_execute(&operator) + .await; + + assert!( + issued.is_err(), + "issuing a capability without AddCapabilities permission must fail" + ); + + Ok(()) +} + +#[tokio::test] +async fn revoke_capability_requires_revoke_capabilities_permission() -> anyhow::Result<()> { + let admin = get_funded_test_client().await?; + let no_revoke = get_funded_test_client().await?; + let target = get_funded_test_client().await?; + let trail_id = admin.create_test_trail(Data::text("missing-revoke-permission")).await?; + + admin + .create_role(trail_id, "NoRevokePerm", vec![Permission::AddRecord], None) + .await?; + admin + .create_role( + trail_id, + "RecordAdmin", + PermissionSet::record_admin_permissions().permissions, + None, + ) + .await?; + admin + .issue_cap( + trail_id, + "NoRevokePerm", + CapabilityIssueOptions { + issued_to: Some(no_revoke.sender_address()), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + let target_cap = admin + .issue_cap( + trail_id, + "RecordAdmin", + CapabilityIssueOptions { + issued_to: Some(target.sender_address()), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + + let revoked = no_revoke + .trail(trail_id) + .access() + .revoke_capability(target_cap.capability_id, target_cap.valid_until) + .build_and_execute(&no_revoke) + .await; + + assert!( + revoked.is_err(), + "revoking a capability without RevokeCapabilities permission must fail" + ); + + Ok(()) +} + +#[tokio::test] +async fn delete_role_prevents_new_capability_issuance() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("access-e2e")).await?; + let access = client.trail(trail_id).access(); + let role_name = "to-delete"; + + client + .create_role(trail_id, role_name, vec![Permission::AddRecord], None) + .await?; + let deleted = access + .for_role(role_name) + .delete() + .build_and_execute(&client) + .await? + .output; + assert_eq!(deleted.trail_id, trail_id); + assert_eq!(deleted.role, role_name.to_string()); + assert_eq!(deleted.deleted_by, client.sender_address()); + assert!(deleted.timestamp > 0); + + let issue_tx = access + .for_role(role_name) + .issue_capability(CapabilityIssueOptions::default()); + let issue_after_delete = issue_tx.build_and_execute(&client).await; + assert!( + issue_after_delete.is_err(), + "issuing a capability for a deleted role must fail" + ); + Ok(()) +} + +#[tokio::test] +async fn issue_capability_with_constraints() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("access-e2e")).await?; + let role_name = "reviewer"; + + client + .create_role(trail_id, role_name, vec![Permission::AddRecord], None) + .await?; + + let issued_to = IotaAddress::random_for_testing_only(); + let constrained = CapabilityIssueOptions { + issued_to: Some(issued_to), + valid_from_ms: Some(1_700_000_000_000), + valid_until_ms: Some(1_700_000_001_000), + }; + + let issued = client.issue_cap(trail_id, role_name, constrained.clone()).await?; + + assert_eq!(issued.target_key, trail_id); + assert_eq!(issued.role, role_name.to_string()); + assert_eq!(issued.issued_to, constrained.issued_to); + assert_eq!(issued.valid_from, constrained.valid_from_ms); + assert_eq!(issued.valid_until, constrained.valid_until_ms); + + Ok(()) +} + +#[tokio::test] +async fn revoke_capability_emits_expected_event_data() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("access-e2e")).await?; + let access = client.trail(trail_id).access(); + let role_name = "revoker"; + + client + .create_role(trail_id, role_name, vec![Permission::AddRecord], None) + .await?; + + let issued = client + .issue_cap(trail_id, role_name, CapabilityIssueOptions::default()) + .await?; + + let revoked = access + .revoke_capability(issued.capability_id, issued.valid_until) + .build_and_execute(&client) + .await? + .output; + assert_eq!(revoked.target_key, trail_id); + assert_eq!(revoked.capability_id, issued.capability_id); + assert_eq!(revoked.valid_until, 0); + + Ok(()) +} + +#[tokio::test] +async fn destroy_capability_emits_expected_event_data() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("access-e2e")).await?; + let access = client.trail(trail_id).access(); + let role_name = "destroyer"; + + client + .create_role(trail_id, role_name, vec![Permission::AddRecord], None) + .await?; + + let issued = client + .issue_cap(trail_id, role_name, CapabilityIssueOptions::default()) + .await?; + + let destroyed = access + .destroy_capability(issued.capability_id) + .build_and_execute(&client) + .await? + .output; + + assert_eq!(destroyed.target_key, trail_id); + assert_eq!(destroyed.capability_id, issued.capability_id); + assert_eq!(destroyed.role, role_name.to_string()); + assert_eq!(destroyed.issued_to, None); + assert_eq!(destroyed.valid_from, None); + assert_eq!(destroyed.valid_until, None); + + Ok(()) +} + +#[tokio::test] +async fn destroy_initial_admin_capability_emits_expected_event() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("access-e2e")).await?; + let access = client.trail(trail_id).access(); + + let admin_cap_ref = client.get_cap(client.sender_address(), trail_id).await?; + let admin_cap_id = admin_cap_ref.0; + + let destroyed = access + .destroy_initial_admin_capability(admin_cap_id) + .build_and_execute(&client) + .await? + .output; + + assert_eq!(destroyed.target_key, trail_id); + assert_eq!(destroyed.capability_id, admin_cap_id); + assert_eq!(destroyed.role, "Admin".to_string()); + assert_eq!(destroyed.issued_to, None); + assert_eq!(destroyed.valid_from, None); + assert_eq!(destroyed.valid_until, None); + + Ok(()) +} + +#[tokio::test] +async fn revoke_initial_admin_capability_emits_expected_event() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("access-e2e")).await?; + + // Issue a second admin capability so we can use the original to revoke it + let second_admin = client + .issue_cap(trail_id, "Admin", CapabilityIssueOptions::default()) + .await?; + + let access = client.trail(trail_id).access(); + let revoked = access + .revoke_initial_admin_capability(second_admin.capability_id, second_admin.valid_until) + .build_and_execute(&client) + .await? + .output; + + assert_eq!(revoked.target_key, trail_id); + assert_eq!(revoked.capability_id, second_admin.capability_id); + assert_eq!(revoked.valid_until, 0); + + Ok(()) +} + +#[tokio::test] +async fn regular_destroy_rejects_initial_admin_capability() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("access-e2e")).await?; + let access = client.trail(trail_id).access(); + + let admin_cap_ref = client.get_cap(client.sender_address(), trail_id).await?; + let admin_cap_id = admin_cap_ref.0; + + let result = access.destroy_capability(admin_cap_id).build_and_execute(&client).await; + + assert!( + result.is_err(), + "destroying an initial admin cap via regular destroy_capability must fail" + ); + + Ok(()) +} + +#[tokio::test] +async fn regular_revoke_rejects_initial_admin_capability() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("access-e2e")).await?; + let access = client.trail(trail_id).access(); + + let admin_cap_ref = client.get_cap(client.sender_address(), trail_id).await?; + let admin_cap_id = admin_cap_ref.0; + + let result = access + .revoke_capability(admin_cap_id, None) + .build_and_execute(&client) + .await; + + assert!( + result.is_err(), + "revoking an initial admin cap via regular revoke_capability must fail" + ); + + Ok(()) +} + +#[tokio::test] +async fn cleanup_revoked_capabilities_removes_expired_entries() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("access-e2e")).await?; + let access = client.trail(trail_id).access(); + let role_name = "cleanup-target"; + + client + .create_role(trail_id, role_name, vec![Permission::AddRecord], None) + .await?; + + let issued = client + .issue_cap( + trail_id, + role_name, + CapabilityIssueOptions { + issued_to: None, + valid_from_ms: None, + valid_until_ms: Some(1), + }, + ) + .await?; + + access + .revoke_capability(issued.capability_id, issued.valid_until) + .build_and_execute(&client) + .await?; + + let trail = client.trail(trail_id); + let before_cleanup = trail.get().await?; + assert_eq!(before_cleanup.roles.revoked_capabilities.size, 1); + + access.cleanup_revoked_capabilities().build_and_execute(&client).await?; + + let after_cleanup = trail.get().await?; + assert_eq!(after_cleanup.roles.revoked_capabilities.size, 0); + + Ok(()) +} diff --git a/audit-trail-rs/tests/e2e/client.rs b/audit-trail-rs/tests/e2e/client.rs new file mode 100644 index 00000000..0bca63b4 --- /dev/null +++ b/audit-trail-rs/tests/e2e/client.rs @@ -0,0 +1,302 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::HashSet; +use std::ops::Deref; +use std::str::FromStr; +use std::sync::Arc; + +use anyhow::{Context, anyhow}; +use audit_trail::core::types::{ + Capability, CapabilityIssueOptions, CapabilityIssued, Data, InitialRecord, Permission, PermissionSet, RoleCreated, + RoleTags, +}; +use audit_trail::{AuditTrailClient, PackageOverrides}; +use iota_interaction::types::base_types::{IotaAddress, ObjectID, ObjectRef}; +use iota_interaction::types::crypto::PublicKey; +use iota_interaction::{IOTA_LOCAL_NETWORK_URL, IotaClient, IotaClientBuilder}; +use iota_interaction_rust::IotaClientAdapter; +use product_common::core_client::{CoreClient, CoreClientReadOnly}; +use product_common::network_name::NetworkName; +use product_common::test_utils::{InMemSigner, request_funds}; +use tokio::fs; +use tokio::process::Command; +use tokio::sync::OnceCell; + +static PACKAGE_IDS: OnceCell = OnceCell::const_new(); + +/// Script file for publishing the package. +pub const PUBLISH_SCRIPT_FILE: &str = concat!( + env!("CARGO_MANIFEST_DIR"), + "/../audit-trail-move/scripts/publish_package.sh" +); + +const CACHED_PKG_FILE: &str = "/tmp/audit_trail_pkg_ids.txt"; + +#[derive(Clone, Copy)] +struct PublishedPackageIds { + audit_trail_package_id: ObjectID, + tf_components_package_id: Option, +} + +pub async fn get_funded_test_client() -> anyhow::Result { + TestClient::new().await +} + +async fn load_cached_package_ids(chain_id: &str) -> anyhow::Result { + let cache = fs::read_to_string(CACHED_PKG_FILE).await?; + let mut parts = cache.trim().split(';'); + let audit_trail_package_id = parts + .next() + .ok_or_else(|| anyhow!("missing audit_trail package ID in cache"))?; + let tf_components_package_id = parts.next().unwrap_or_default(); + let cached_chain_id = parts.next().ok_or_else(|| anyhow!("missing chain ID in cache"))?; + + if cached_chain_id != chain_id { + anyhow::bail!("cached package IDs belong to a different chain"); + } + + Ok(PublishedPackageIds { + audit_trail_package_id: ObjectID::from_str(audit_trail_package_id) + .context("failed to parse cached audit_trail package ID")?, + tf_components_package_id: if tf_components_package_id.is_empty() { + None + } else { + Some( + ObjectID::from_str(tf_components_package_id) + .context("failed to parse cached TfComponents package ID")?, + ) + }, + }) +} + +async fn publish_package_ids(iota_client: &IotaClient) -> anyhow::Result { + let chain_id = iota_client + .read_api() + .get_chain_identifier() + .await + .map_err(|e| anyhow!(e.to_string()))?; + + if let Ok(ids) = load_cached_package_ids(&chain_id).await { + return Ok(ids); + } + + let output = Command::new("bash") + .arg(PUBLISH_SCRIPT_FILE) + .output() + .await + .context("failed to execute publish_package.sh")?; + + let stdout = std::str::from_utf8(&output.stdout).context("publish script stdout is not valid utf-8")?; + + if !output.status.success() { + let stderr = std::str::from_utf8(&output.stderr).context("publish script stderr is not valid utf-8")?; + anyhow::bail!("failed to publish move package: \n\n{stdout}\n\n{stderr}"); + } + + let mut audit_trail_package_id = None; + let mut tf_components_package_id = None; + + for line in stdout.lines() { + let Some(exported) = line.strip_prefix("export ") else { + continue; + }; + let Some((key, value)) = exported.split_once('=') else { + continue; + }; + + match key { + "IOTA_AUDIT_TRAIL_PKG_ID" => { + let package_id = + ObjectID::from_str(value).context("failed to parse published audit_trail package ID")?; + audit_trail_package_id = Some(package_id); + } + "IOTA_TF_COMPONENTS_PKG_ID" => { + let package_id = + ObjectID::from_str(value).context("failed to parse published TfComponents package ID")?; + tf_components_package_id = Some(package_id); + } + _ => {} + } + } + + let ids = PublishedPackageIds { + audit_trail_package_id: audit_trail_package_id + .ok_or_else(|| anyhow!("publish script did not expose IOTA_AUDIT_TRAIL_PKG_ID"))?, + tf_components_package_id, + }; + + fs::write( + CACHED_PKG_FILE, + format!( + "{};{};{}", + ids.audit_trail_package_id, + ids.tf_components_package_id + .map(|package_id| package_id.to_string()) + .unwrap_or_default(), + chain_id + ), + ) + .await + .context("failed to write cached package IDs")?; + + Ok(ids) +} + +#[derive(Clone)] +pub struct TestClient { + client: Arc>, +} + +impl Deref for TestClient { + type Target = AuditTrailClient; + fn deref(&self) -> &Self::Target { + &self.client + } +} + +impl TestClient { + pub async fn new() -> anyhow::Result { + let api_endpoint = std::env::var("API_ENDPOINT").unwrap_or_else(|_| IOTA_LOCAL_NETWORK_URL.to_string()); + let iota_client = IotaClientBuilder::default().build(&api_endpoint).await?; + let package_ids = PACKAGE_IDS + .get_or_try_init(|| publish_package_ids(&iota_client)) + .await + .copied()?; + + // Use a dedicated ephemeral signer per test to avoid object-lock contention. + let signer = InMemSigner::new(); + let signer_address = signer.get_address().await?; + request_funds(&signer_address).await?; + + let client = AuditTrailClient::from_iota_client( + iota_client.clone(), + Some(PackageOverrides { + audit_trail: Some(package_ids.audit_trail_package_id), + tf_component: package_ids.tf_components_package_id, + }), + ) + .await?; + let client = client.with_signer(signer).await?; + + Ok(TestClient { + client: Arc::new(client), + }) + } + + pub(crate) async fn get_cap(&self, owner: IotaAddress, trail_id: ObjectID) -> anyhow::Result { + let cap: Capability = self + .client + .find_object_for_address(owner, |cap: &Capability| cap.target_key == trail_id) + .await + .map_err(|e| anyhow::anyhow!("Failed to find accredit cap for owner {owner} and trail {trail_id}: {e}"))? + .ok_or_else(|| anyhow::anyhow!("No accredit capability found for owner {owner} and trail {trail_id}"))?; + + let object_id = *cap.id.object_id(); + + Ok(self + .client + .get_object_ref_by_id(object_id) + .await + .map_err(|e| anyhow::anyhow!("Failed to get object ref for accredit cap: {e}"))? + .map(|owned_ref| owned_ref.reference.to_object_ref()) + .unwrap()) + } + + /// Creates a trail with the given initial record data and returns its ObjectID. + pub(crate) async fn create_test_trail(&self, data: Data) -> anyhow::Result { + self.create_test_trail_with_tags(data, std::iter::empty::()) + .await + } + + /// Creates a trail with the given initial record data and available tags. + pub(crate) async fn create_test_trail_with_tags(&self, data: Data, tags: I) -> anyhow::Result + where + I: IntoIterator, + S: Into, + { + let created = self + .create_trail() + .with_initial_record(InitialRecord::new(data, None, None)) + .with_record_tags(tags) + .finish() + .build_and_execute(self) + .await? + .output; + Ok(created.trail_id) + } + + /// Creates a role on the given trail with the specified permissions and optional role tags. + pub(crate) async fn create_role( + &self, + trail_id: ObjectID, + role_name: &str, + permissions: impl IntoIterator, + role_tags: Option, + ) -> anyhow::Result { + let created = self + .trail(trail_id) + .access() + .for_role(role_name) + .create( + PermissionSet { + permissions: permissions.into_iter().collect::>(), + }, + role_tags, + ) + .build_and_execute(self) + .await? + .output; + Ok(created) + } + + /// Issues a capability for the given role on the trail. + pub(crate) async fn issue_cap( + &self, + trail_id: ObjectID, + role_name: &str, + options: CapabilityIssueOptions, + ) -> anyhow::Result { + let issued = self + .trail(trail_id) + .access() + .for_role(role_name) + .issue_capability(options) + .build_and_execute(self) + .await? + .output; + Ok(issued) + } +} + +impl CoreClientReadOnly for TestClient { + fn package_id(&self) -> ObjectID { + self.client.package_id() + } + + fn tf_components_package_id(&self) -> Option { + Some(self.client.tf_components_package_id()) + } + + fn network_name(&self) -> &NetworkName { + self.client.network_name() + } + + fn client_adapter(&self) -> &IotaClientAdapter { + self.client.client_adapter() + } +} + +impl CoreClient for TestClient { + fn signer(&self) -> &InMemSigner { + self.client.signer() + } + + fn sender_address(&self) -> IotaAddress { + self.client.sender_address() + } + + fn sender_public_key(&self) -> &PublicKey { + self.client.sender_public_key() + } +} diff --git a/audit-trail-rs/tests/e2e/locking.rs b/audit-trail-rs/tests/e2e/locking.rs new file mode 100644 index 00000000..2d70f8f0 --- /dev/null +++ b/audit-trail-rs/tests/e2e/locking.rs @@ -0,0 +1,422 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use audit_trail::core::types::{ + CapabilityIssueOptions, Data, InitialRecord, LockingConfig, LockingWindow, Permission, TimeLock, +}; +use iota_interaction::types::base_types::ObjectID; + +use crate::client::{TestClient, get_funded_test_client}; + +async fn grant_role_capability( + client: &TestClient, + trail_id: ObjectID, + role_name: &str, + permissions: impl IntoIterator, +) -> anyhow::Result<()> { + client.create_role(trail_id, role_name, permissions, None).await?; + client + .issue_cap(trail_id, role_name, CapabilityIssueOptions::default()) + .await?; + Ok(()) +} + +fn config_with_window(delete_record_window: LockingWindow) -> LockingConfig { + LockingConfig { + delete_record_window, + delete_trail_lock: TimeLock::None, + write_lock: TimeLock::None, + } +} + +#[tokio::test] +async fn update_locking_config_roundtrip() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("trail-update-locking-e2e")).await?; + let trail = client.trail(trail_id); + + grant_role_capability(&client, trail_id, "LockingAdmin", [Permission::UpdateLockingConfig]).await?; + + trail + .locking() + .update(config_with_window(LockingWindow::CountBased { count: 2 })) + .build_and_execute(&client) + .await?; + + let on_chain = trail.get().await?; + assert_eq!( + on_chain.locking_config, + config_with_window(LockingWindow::CountBased { count: 2 }) + ); + + Ok(()) +} + +#[tokio::test] +async fn update_locking_config_switches_count_to_time_based() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_trail() + .with_initial_record(InitialRecord::new( + Data::text("trail-switch-count-to-time-e2e"), + None, + None, + )) + .with_locking_config(config_with_window(LockingWindow::CountBased { count: 3 })) + .finish() + .build_and_execute(&client) + .await? + .output + .trail_id; + let trail = client.trail(trail_id); + + grant_role_capability(&client, trail_id, "LockingAdmin", [Permission::UpdateLockingConfig]).await?; + + let before = trail.get().await?; + assert_eq!( + before.locking_config, + config_with_window(LockingWindow::CountBased { count: 3 }) + ); + + trail + .locking() + .update(config_with_window(LockingWindow::TimeBased { seconds: 300 })) + .build_and_execute(&client) + .await?; + + let after = trail.get().await?; + assert_eq!( + after.locking_config, + config_with_window(LockingWindow::TimeBased { seconds: 300 }) + ); + + Ok(()) +} + +#[tokio::test] +async fn update_delete_record_window_roundtrip() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail(Data::text("trail-update-delete-window-e2e")) + .await?; + let trail = client.trail(trail_id); + + grant_role_capability( + &client, + trail_id, + "DeleteWindowAdmin", + [Permission::UpdateLockingConfigForDeleteRecord], + ) + .await?; + + trail + .locking() + .update_delete_record_window(LockingWindow::TimeBased { seconds: 120 }) + .build_and_execute(&client) + .await?; + + let on_chain = trail.get().await?; + assert_eq!( + on_chain.locking_config, + config_with_window(LockingWindow::TimeBased { seconds: 120 }) + ); + + Ok(()) +} + +#[tokio::test] +async fn update_delete_trail_lock_roundtrip() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail(Data::text("trail-update-delete-trail-lock-e2e")) + .await?; + let trail = client.trail(trail_id); + + grant_role_capability( + &client, + trail_id, + "DeleteTrailLockAdmin", + [Permission::UpdateLockingConfigForDeleteTrail], + ) + .await?; + + trail + .locking() + .update_delete_trail_lock(TimeLock::Infinite) + .build_and_execute(&client) + .await?; + + let on_chain = trail.get().await?; + assert_eq!( + on_chain.locking_config, + LockingConfig { + delete_record_window: LockingWindow::None, + delete_trail_lock: TimeLock::Infinite, + write_lock: TimeLock::None, + } + ); + + Ok(()) +} + +#[tokio::test] +async fn update_write_lock_roundtrip_and_blocks_add_record() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail(Data::text("trail-update-write-lock-e2e")) + .await?; + let trail = client.trail(trail_id); + + grant_role_capability( + &client, + trail_id, + "WriteLockAdmin", + [Permission::UpdateLockingConfigForWrite, Permission::AddRecord], + ) + .await?; + + trail + .locking() + .update_write_lock(TimeLock::Infinite) + .build_and_execute(&client) + .await?; + + let on_chain = trail.get().await?; + assert_eq!( + on_chain.locking_config, + LockingConfig { + delete_record_window: LockingWindow::None, + delete_trail_lock: TimeLock::None, + write_lock: TimeLock::Infinite, + } + ); + + let add_locked = trail + .records() + .add(Data::text("should-fail-write-locked"), None, None) + .build_and_execute(&client) + .await; + assert!(add_locked.is_err(), "write lock should block adding new records"); + + Ok(()) +} + +#[tokio::test] +async fn update_locking_config_requires_permission() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail(Data::text("trail-locking-permission-e2e")) + .await?; + + let result = client + .trail(trail_id) + .locking() + .update(config_with_window(LockingWindow::TimeBased { seconds: 60 })) + .build_and_execute(&client) + .await; + + assert!( + result.is_err(), + "updating locking config without UpdateLockingConfig permission must fail" + ); + + Ok(()) +} + +#[tokio::test] +async fn update_write_lock_requires_permission() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail(Data::text("trail-write-lock-permission-e2e")) + .await?; + + let update_result = client + .trail(trail_id) + .locking() + .update_write_lock(TimeLock::Infinite) + .build_and_execute(&client) + .await; + + assert!( + update_result.is_err(), + "updating write lock without UpdateLockingConfigForWrite permission must fail" + ); + + Ok(()) +} + +#[tokio::test] +async fn is_record_locked_supports_count_window_and_missing_sequence() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_trail() + .with_initial_record(InitialRecord::new(Data::text("trail-locking-status-e2e"), None, None)) + .with_locking_config(config_with_window(LockingWindow::CountBased { count: 2 })) + .finish() + .build_and_execute(&client) + .await? + .output + .trail_id; + let trail = client.trail(trail_id); + + grant_role_capability(&client, trail_id, "RecordWriter", [Permission::AddRecord]).await?; + + trail + .records() + .add(Data::text("record-1"), None, None) + .build_and_execute(&client) + .await?; + trail + .records() + .add(Data::text("record-2"), None, None) + .build_and_execute(&client) + .await?; + + assert!( + !trail.locking().is_record_locked(0).await?, + "oldest record should be unlocked with count window of 2 and total records of 3" + ); + assert!( + trail.locking().is_record_locked(2).await?, + "latest record should be locked with count window of 2" + ); + + let missing = trail.locking().is_record_locked(999).await; + assert!(missing.is_err(), "missing sequence should fail"); + + Ok(()) +} + +#[tokio::test] +async fn delete_window_variants_roundtrip() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail(Data::text("trail-locking-window-variants-e2e")) + .await?; + let trail = client.trail(trail_id); + + grant_role_capability( + &client, + trail_id, + "DeleteWindowAdmin", + [Permission::UpdateLockingConfigForDeleteRecord], + ) + .await?; + + trail + .locking() + .update_delete_record_window(LockingWindow::TimeBased { seconds: 3600 }) + .build_and_execute(&client) + .await?; + + let on_chain = trail.get().await?; + assert_eq!( + on_chain.locking_config, + config_with_window(LockingWindow::TimeBased { seconds: 3600 }) + ); + + trail + .locking() + .update_delete_record_window(LockingWindow::CountBased { count: 1 }) + .build_and_execute(&client) + .await?; + + let on_chain = trail.get().await?; + assert_eq!( + on_chain.locking_config, + config_with_window(LockingWindow::CountBased { count: 1 }) + ); + + trail + .locking() + .update_delete_record_window(LockingWindow::None) + .build_and_execute(&client) + .await?; + + let on_chain = trail.get().await?; + assert_eq!(on_chain.locking_config, config_with_window(LockingWindow::None)); + + Ok(()) +} + +#[tokio::test] +async fn updated_time_lock_blocks_record_deletion() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail(Data::text("trail-locking-delete-time-e2e")) + .await?; + let trail = client.trail(trail_id); + + grant_role_capability( + &client, + trail_id, + "LockAndDeleteAdmin", + [ + Permission::AddRecord, + Permission::DeleteRecord, + Permission::UpdateLockingConfig, + ], + ) + .await?; + + trail + .records() + .add("deletable-before-lock".into(), None, None) + .build_and_execute(&client) + .await?; + + trail + .locking() + .update(config_with_window(LockingWindow::TimeBased { seconds: 3600 })) + .build_and_execute(&client) + .await?; + + let delete_locked = trail.records().delete(1).build_and_execute(&client).await; + assert!( + delete_locked.is_err(), + "deleting a record should fail after enabling a time-based delete lock" + ); + assert_eq!(trail.records().record_count().await?, 2); + + Ok(()) +} + +#[tokio::test] +async fn updated_delete_window_can_block_and_then_allow_deletion() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail(Data::text("trail-locking-delete-window-e2e")) + .await?; + let trail = client.trail(trail_id); + + grant_role_capability( + &client, + trail_id, + "DeleteWindowAdmin", + [Permission::DeleteRecord, Permission::UpdateLockingConfigForDeleteRecord], + ) + .await?; + + trail + .locking() + .update_delete_record_window(LockingWindow::CountBased { count: 1 }) + .build_and_execute(&client) + .await?; + + let delete_locked = trail.records().delete(0).build_and_execute(&client).await; + assert!( + delete_locked.is_err(), + "count-based window should block deleting the latest record" + ); + + trail + .locking() + .update_delete_record_window(LockingWindow::None) + .build_and_execute(&client) + .await?; + + trail.records().delete(0).build_and_execute(&client).await?; + assert_eq!(trail.records().record_count().await?, 0); + + Ok(()) +} diff --git a/audit-trail-rs/tests/e2e/main.rs b/audit-trail-rs/tests/e2e/main.rs new file mode 100644 index 00000000..cab94322 --- /dev/null +++ b/audit-trail-rs/tests/e2e/main.rs @@ -0,0 +1,8 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +mod access; +mod client; +mod locking; +mod records; +mod trail; diff --git a/audit-trail-rs/tests/e2e/records.rs b/audit-trail-rs/tests/e2e/records.rs new file mode 100644 index 00000000..ea798b9c --- /dev/null +++ b/audit-trail-rs/tests/e2e/records.rs @@ -0,0 +1,1335 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use std::time::{SystemTime, UNIX_EPOCH}; + +use audit_trail::core::types::{ + CapabilityIssueOptions, Data, InitialRecord, LockingConfig, LockingWindow, Permission, RoleTags, TimeLock, +}; +use audit_trail::error::Error; +use iota_interaction::types::base_types::ObjectID; +use product_common::core_client::CoreClient; +use tokio::time::{Duration, sleep}; + +use crate::client::{TestClient, get_funded_test_client}; + +async fn grant_role_capability( + client: &TestClient, + trail_id: ObjectID, + role_name: &str, + permissions: impl IntoIterator, +) -> anyhow::Result<()> { + client.create_role(trail_id, role_name, permissions, None).await?; + client + .issue_cap(trail_id, role_name, CapabilityIssueOptions::default()) + .await?; + Ok(()) +} + +fn assert_text_data(data: Data, expected: &str) { + match data { + Data::Text(actual) => assert_eq!(actual, expected), + other => panic!("expected text data, got {other:?}"), + } +} + +fn assert_bytes_data(data: Data, expected: &[u8]) { + match data { + Data::Bytes(actual) => assert_eq!(actual, expected), + other => panic!("expected bytes data, got {other:?}"), + } +} + +fn config_with_window(delete_record_window: LockingWindow) -> LockingConfig { + LockingConfig { + delete_record_window, + delete_trail_lock: TimeLock::None, + write_lock: TimeLock::None, + } +} + +#[tokio::test] +async fn add_and_fetch_record_roundtrip() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("records-e2e")).await?; + let records = client.trail(trail_id).records(); + + grant_role_capability(&client, trail_id, "RecordWriter", [Permission::AddRecord]).await?; + + let added = records + .add(Data::text("second record"), Some("second metadata".to_string()), None) + .build_and_execute(&client) + .await? + .output; + + assert_eq!(added.trail_id, trail_id); + assert_eq!(added.sequence_number, 1); + assert_eq!(added.added_by, client.sender_address()); + assert!(added.timestamp > 0); + + let record = records.get(1).await?; + assert_eq!(record.sequence_number, 1); + assert_eq!(record.metadata, Some("second metadata".to_string())); + assert_eq!(record.added_by, client.sender_address()); + assert!(record.added_at > 0); + assert_text_data(record.data, "second record"); + + assert_eq!(records.record_count().await?, 2); + + Ok(()) +} + +#[tokio::test] +async fn add_and_fetch_tagged_record_roundtrip() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail_with_tags(Data::text("records-tagged"), ["finance"]) + .await?; + let records = client.trail(trail_id).records(); + + client + .create_role( + trail_id, + "TaggedWriter", + [Permission::AddRecord], + Some(RoleTags::new(["finance"])), + ) + .await?; + client + .issue_cap(trail_id, "TaggedWriter", CapabilityIssueOptions::default()) + .await?; + + let added = records + .add( + Data::text("finance record"), + Some("tagged metadata".to_string()), + Some("finance".to_string()), + ) + .build_and_execute(&client) + .await? + .output; + + assert_eq!(added.trail_id, trail_id); + assert_eq!(added.sequence_number, 1); + + let record = records.get(1).await?; + assert_eq!(record.tag, Some("finance".to_string())); + assert_eq!(record.metadata, Some("tagged metadata".to_string())); + assert_text_data(record.data, "finance record"); + + Ok(()) +} + +#[tokio::test] +async fn add_tagged_record_requires_matching_role_tag_access() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail_with_tags(Data::text("records-tagged-deny"), ["finance"]) + .await?; + let records = client.trail(trail_id).records(); + + grant_role_capability(&client, trail_id, "PlainWriter", [Permission::AddRecord]).await?; + + let denied = records + .add(Data::text("should fail"), None, Some("finance".to_string())) + .build_and_execute(&client) + .await; + + assert!(denied.is_err(), "tagged writes should require matching role tag access"); + assert_eq!(records.record_count().await?, 1); + + Ok(()) +} + +#[tokio::test] +async fn add_tagged_record_requires_trail_defined_tag() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail_with_tags(Data::text("records-tagged-undefined"), ["finance"]) + .await?; + let records = client.trail(trail_id).records(); + + client + .create_role( + trail_id, + "TaggedWriter", + [Permission::AddRecord], + Some(RoleTags::new(["finance"])), + ) + .await?; + client + .issue_cap(trail_id, "TaggedWriter", CapabilityIssueOptions::default()) + .await?; + + let denied = records + .add(Data::text("should fail"), None, Some("legal".to_string())) + .build_and_execute(&client) + .await; + + assert!( + denied.is_err(), + "tagged writes should require the tag to be defined on the trail" + ); + assert_eq!(records.record_count().await?, 1); + + Ok(()) +} + +#[tokio::test] +async fn add_record_requires_add_record_permission() -> anyhow::Result<()> { + let admin = get_funded_test_client().await?; + let writer = get_funded_test_client().await?; + let trail_id = admin.create_test_trail(Data::text("records-add-permission")).await?; + let records = writer.trail(trail_id).records(); + + admin + .create_role(trail_id, "NoAddRecord", [Permission::DeleteRecord], None) + .await?; + admin + .issue_cap( + trail_id, + "NoAddRecord", + CapabilityIssueOptions { + issued_to: Some(writer.sender_address()), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + + let denied = records + .add(Data::text("should fail"), None, None) + .build_and_execute(&writer) + .await; + + assert!(denied.is_err(), "adding without AddRecord permission must fail"); + assert_eq!(admin.trail(trail_id).records().record_count().await?, 1); + + Ok(()) +} + +#[tokio::test] +async fn add_record_selector_skips_revoked_capability_when_valid_one_exists() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + + // Untagged record flow. + let trail_id = client.create_test_trail(Data::text("records-revoked-selector")).await?; + let records = client.trail(trail_id).records(); + let role_name = "RecordWriter"; + + client + .create_role(trail_id, role_name, [Permission::AddRecord], None) + .await?; + + // Revoked capability. + let revoked_cap = client + .issue_cap(trail_id, role_name, CapabilityIssueOptions::default()) + .await?; + client + .trail(trail_id) + .access() + .revoke_capability(revoked_cap.capability_id, revoked_cap.valid_until) + .build_and_execute(&client) + .await?; + + // Valid fallback capability. + client + .issue_cap(trail_id, role_name, CapabilityIssueOptions::default()) + .await?; + + let added = records + .add(Data::text("writer record"), None, None) + .build_and_execute(&client) + .await? + .output; + + assert_eq!(added.sequence_number, 1); + assert_text_data(records.get(1).await?.data, "writer record"); + + Ok(()) +} + +#[tokio::test] +async fn revoked_capability_cannot_add_record_without_fallback() -> anyhow::Result<()> { + let admin = get_funded_test_client().await?; + let writer = get_funded_test_client().await?; + let trail_id = admin.create_test_trail(Data::text("records-revoked-hard-fail")).await?; + let records = writer.trail(trail_id).records(); + let role_name = "RecordWriter"; + + admin + .create_role(trail_id, role_name, [Permission::AddRecord], None) + .await?; + let issued = admin + .issue_cap( + trail_id, + role_name, + CapabilityIssueOptions { + issued_to: Some(writer.sender_address()), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + + admin + .trail(trail_id) + .access() + .revoke_capability(issued.capability_id, issued.valid_until) + .build_and_execute(&admin) + .await?; + + let denied = records + .add(Data::text("should fail"), None, None) + .build_and_execute(&writer) + .await; + + assert!(denied.is_err(), "revoked capabilities must not authorize writes"); + assert_eq!(admin.trail(trail_id).records().record_count().await?, 1); + + Ok(()) +} + +#[tokio::test] +async fn add_tagged_record_skips_revoked_capability_when_valid_one_exists() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let tagged_trail_id = client + .create_test_trail_with_tags(Data::text("records-revoked-tagged"), ["finance"]) + .await?; + let tagged_records = client.trail(tagged_trail_id).records(); + let tagged_role_name = "TaggedWriter"; + + client + .create_role( + tagged_trail_id, + tagged_role_name, + [Permission::AddRecord], + Some(RoleTags::new(["finance"])), + ) + .await?; + + // Revoked capability. + let revoked_tagged_cap = client + .issue_cap(tagged_trail_id, tagged_role_name, CapabilityIssueOptions::default()) + .await?; + client + .trail(tagged_trail_id) + .access() + .revoke_capability(revoked_tagged_cap.capability_id, revoked_tagged_cap.valid_until) + .build_and_execute(&client) + .await?; + + // Valid fallback capability. + client + .issue_cap(tagged_trail_id, tagged_role_name, CapabilityIssueOptions::default()) + .await?; + + let tagged_added = tagged_records + .add( + Data::text("finance entry"), + Some("tagged".to_string()), + Some("finance".to_string()), + ) + .build_and_execute(&client) + .await? + .output; + + assert_eq!(tagged_added.sequence_number, 1); + assert_eq!(tagged_records.get(1).await?.tag, Some("finance".to_string())); + + Ok(()) +} + +#[tokio::test] +async fn add_record_selector_skips_expired_capability_when_valid_one_exists() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let now_ms = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_millis() as u64; + + // Untagged record flow. + let trail_id = client.create_test_trail(Data::text("records-expired-selector")).await?; + let records = client.trail(trail_id).records(); + let role_name = "RecordWriter"; + + client + .create_role(trail_id, role_name, [Permission::AddRecord], None) + .await?; + + // Expired capability. + client + .issue_cap( + trail_id, + role_name, + CapabilityIssueOptions { + valid_until_ms: Some(now_ms.saturating_sub(60_000)), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + + // Valid fallback capability. + client + .issue_cap(trail_id, role_name, CapabilityIssueOptions::default()) + .await?; + + let added = records + .add(Data::text("writer record"), None, None) + .build_and_execute(&client) + .await? + .output; + + assert_eq!(added.sequence_number, 1); + assert_text_data(records.get(1).await?.data, "writer record"); + + // Tagged record flow. + let tagged_trail_id = client + .create_test_trail_with_tags(Data::text("records-expired-tagged"), ["finance"]) + .await?; + let tagged_records = client.trail(tagged_trail_id).records(); + let tagged_role_name = "TaggedWriter"; + + client + .create_role( + tagged_trail_id, + tagged_role_name, + [Permission::AddRecord], + Some(RoleTags::new(["finance"])), + ) + .await?; + + // Expired capability. + client + .issue_cap( + tagged_trail_id, + tagged_role_name, + CapabilityIssueOptions { + valid_until_ms: Some(now_ms.saturating_sub(60_000)), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + + // Valid fallback capability. + client + .issue_cap(tagged_trail_id, tagged_role_name, CapabilityIssueOptions::default()) + .await?; + + let tagged_added = tagged_records + .add( + Data::text("finance entry"), + Some("tagged".to_string()), + Some("finance".to_string()), + ) + .build_and_execute(&client) + .await? + .output; + + assert_eq!(tagged_added.sequence_number, 1); + assert_eq!(tagged_records.get(1).await?.tag, Some("finance".to_string())); + + Ok(()) +} + +#[tokio::test] +async fn add_record_using_capability_uses_selected_capability_without_fallback() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let now_ms = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_millis() as u64; + + // Untagged record flow. + let trail_id = client + .create_test_trail(Data::text("records-explicit-cap-selector")) + .await?; + let role_name = "RecordWriter"; + + client + .create_role(trail_id, role_name, [Permission::AddRecord], None) + .await?; + + let expired_cap = client + .issue_cap( + trail_id, + role_name, + CapabilityIssueOptions { + valid_until_ms: Some(now_ms.saturating_sub(60_000)), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + let valid_cap = client + .issue_cap(trail_id, role_name, CapabilityIssueOptions::default()) + .await?; + + let denied = client + .trail(trail_id) + .records() + .using_capability(expired_cap.capability_id) + .add(Data::text("should fail"), None, None) + .build_and_execute(&client) + .await; + + assert!( + denied.is_err(), + "explicit capability selection should not fall back when the chosen capability is expired" + ); + + let added = client + .trail(trail_id) + .records() + .using_capability(valid_cap.capability_id) + .add(Data::text("writer record"), None, None) + .build_and_execute(&client) + .await? + .output; + + assert_eq!(added.sequence_number, 1); + assert_text_data(client.trail(trail_id).records().get(1).await?.data, "writer record"); + + // Tagged record flow. + let tagged_trail_id = client + .create_test_trail_with_tags(Data::text("records-explicit-cap-tagged"), ["finance"]) + .await?; + let tagged_role_name = "TaggedWriter"; + + client + .create_role( + tagged_trail_id, + tagged_role_name, + [Permission::AddRecord], + Some(RoleTags::new(["finance"])), + ) + .await?; + + let expired_tagged_cap = client + .issue_cap( + tagged_trail_id, + tagged_role_name, + CapabilityIssueOptions { + valid_until_ms: Some(now_ms.saturating_sub(60_000)), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + let valid_tagged_cap = client + .issue_cap(tagged_trail_id, tagged_role_name, CapabilityIssueOptions::default()) + .await?; + + let tagged_denied = client + .trail(tagged_trail_id) + .records() + .using_capability(expired_tagged_cap.capability_id) + .add( + Data::text("should fail"), + Some("tagged".to_string()), + Some("finance".to_string()), + ) + .build_and_execute(&client) + .await; + + assert!( + tagged_denied.is_err(), + "tagged writes should also use the explicitly selected capability without fallback" + ); + + let tagged_added = client + .trail(tagged_trail_id) + .records() + .using_capability(valid_tagged_cap.capability_id) + .add( + Data::text("finance entry"), + Some("tagged".to_string()), + Some("finance".to_string()), + ) + .build_and_execute(&client) + .await? + .output; + + assert_eq!(tagged_added.sequence_number, 1); + assert_eq!( + client.trail(tagged_trail_id).records().get(1).await?.tag, + Some("finance".to_string()) + ); + + Ok(()) +} + +#[tokio::test] +async fn add_record_respects_valid_from_constraint() -> anyhow::Result<()> { + let admin = get_funded_test_client().await?; + let writer = get_funded_test_client().await?; + let trail_id = admin.create_test_trail(Data::text("records-valid-from")).await?; + let records = writer.trail(trail_id).records(); + let role_name = "RecordWriter"; + + admin + .create_role(trail_id, role_name, [Permission::AddRecord], None) + .await?; + let valid_from_ms = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH)? + .as_millis() as u64 + + 15_000; + admin + .issue_cap( + trail_id, + role_name, + CapabilityIssueOptions { + issued_to: Some(writer.sender_address()), + valid_from_ms: Some(valid_from_ms), + valid_until_ms: None, + }, + ) + .await?; + + let denied = records + .add(Data::text("too early"), None, None) + .build_and_execute(&writer) + .await; + assert!(denied.is_err(), "writes before valid_from must fail"); + + sleep(Duration::from_secs(16)).await; + + let added = records + .add(Data::text("on time"), None, None) + .build_and_execute(&writer) + .await? + .output; + + assert_eq!(added.sequence_number, 1); + assert_text_data(records.get(1).await?.data, "on time"); + + Ok(()) +} + +#[tokio::test] +async fn add_record_respects_valid_until_constraint() -> anyhow::Result<()> { + let admin = get_funded_test_client().await?; + let writer = get_funded_test_client().await?; + let trail_id = admin.create_test_trail(Data::text("records-valid-until")).await?; + let records = writer.trail(trail_id).records(); + let role_name = "RecordWriter"; + + admin + .create_role(trail_id, role_name, [Permission::AddRecord], None) + .await?; + let valid_until_ms = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH)? + .as_millis() as u64 + + 15_000; + admin + .issue_cap( + trail_id, + role_name, + CapabilityIssueOptions { + issued_to: Some(writer.sender_address()), + valid_from_ms: None, + valid_until_ms: Some(valid_until_ms), + }, + ) + .await?; + + let added = records + .add(Data::text("before expiry"), None, None) + .build_and_execute(&writer) + .await? + .output; + assert_eq!(added.sequence_number, 1); + + sleep(Duration::from_secs(16)).await; + + let denied = records + .add(Data::text("after expiry"), None, None) + .build_and_execute(&writer) + .await; + assert!(denied.is_err(), "writes after valid_until must fail"); + + Ok(()) +} + +#[tokio::test] +async fn add_record_allows_mixed_data_variants() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("text-trail")).await?; + let records = client.trail(trail_id).records(); + + grant_role_capability(&client, trail_id, "RecordWriter", [Permission::AddRecord]).await?; + + let added = records + .add( + Data::bytes(vec![0xFF, 0x00, 0xAA]), + Some("binary payload".to_string()), + None, + ) + .build_and_execute(&client) + .await? + .output; + + assert_eq!(added.sequence_number, 1); + assert_eq!(records.record_count().await?, 2); + assert_bytes_data(records.get(1).await?.data, &[0xFF, 0x00, 0xAA]); + + Ok(()) +} + +#[tokio::test] +async fn add_and_fetch_bytes_record_roundtrip() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::bytes(vec![0x10, 0x20, 0x30])).await?; + let records = client.trail(trail_id).records(); + + grant_role_capability(&client, trail_id, "RecordWriter", [Permission::AddRecord]).await?; + + let added = records + .add( + Data::bytes(vec![0xFF, 0x00, 0xAA]), + Some("binary payload".to_string()), + None, + ) + .build_and_execute(&client) + .await? + .output; + + assert_eq!(added.sequence_number, 1); + assert_eq!(records.record_count().await?, 2); + + let record = records.get(1).await?; + assert_eq!(record.metadata, Some("binary payload".to_string())); + assert_bytes_data(record.data, &[0xFF, 0x00, 0xAA]); + + Ok(()) +} + +#[tokio::test] +async fn get_missing_record_fails() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("missing-get")).await?; + let records = client.trail(trail_id).records(); + + let missing = records.get(999).await; + assert!(missing.is_err(), "reading a missing sequence must fail"); + + Ok(()) +} + +#[tokio::test] +async fn delete_record_removes_entry_and_keeps_sequence_monotonic() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("delete-roundtrip")).await?; + let records = client.trail(trail_id).records(); + + grant_role_capability( + &client, + trail_id, + "RecordAdmin", + [Permission::AddRecord, Permission::DeleteRecord], + ) + .await?; + + let added = records + .add(Data::text("surviving record"), Some("keep me".to_string()), None) + .build_and_execute(&client) + .await? + .output; + assert_eq!(added.sequence_number, 1); + + let deleted = records.delete(0).build_and_execute(&client).await?.output; + assert_eq!(deleted.trail_id, trail_id); + assert_eq!(deleted.sequence_number, 0); + assert_eq!(deleted.deleted_by, client.sender_address()); + assert!(deleted.timestamp > 0); + + assert_eq!(records.record_count().await?, 1); + assert!(records.get(0).await.is_err(), "deleted record should be gone"); + + let remaining = records.get(1).await?; + assert_eq!(remaining.sequence_number, 1); + assert_text_data(remaining.data, "surviving record"); + + let on_chain_trail = client.trail(trail_id).get().await?; + assert_eq!( + on_chain_trail.sequence_number, 2, + "sequence_number should stay monotonic even after deletion" + ); + + Ok(()) +} + +#[tokio::test] +async fn delete_tagged_record_requires_matching_role_tag_access() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail_with_tags(Data::text("delete-tagged-deny"), ["finance"]) + .await?; + + client + .create_role( + trail_id, + "TaggedWriter", + [Permission::AddRecord], + Some(RoleTags::new(["finance"])), + ) + .await?; + client + .create_role(trail_id, "DeleteOnly", [Permission::DeleteRecord], None) + .await?; + client + .issue_cap(trail_id, "TaggedWriter", CapabilityIssueOptions::default()) + .await?; + client + .issue_cap(trail_id, "DeleteOnly", CapabilityIssueOptions::default()) + .await?; + + client + .trail(trail_id) + .records() + .add(Data::text("tagged record"), None, Some("finance".to_string())) + .build_and_execute(&client) + .await?; + + let denied = client + .trail(trail_id) + .records() + .delete(1) + .build_and_execute(&client) + .await; + + assert!( + denied.is_err(), + "tagged deletes should require matching role tag access" + ); + assert_eq!(client.trail(trail_id).records().record_count().await?, 2); + assert_eq!( + client.trail(trail_id).records().get(1).await?.tag.as_deref(), + Some("finance") + ); + + Ok(()) +} + +#[tokio::test] +async fn delete_tagged_record_with_matching_role_tag_access_succeeds() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail_with_tags(Data::text("delete-tagged-allow"), ["finance"]) + .await?; + let records = client.trail(trail_id).records(); + + client + .create_role( + trail_id, + "TaggedRecordAdmin", + [Permission::AddRecord, Permission::DeleteRecord], + Some(RoleTags::new(["finance"])), + ) + .await?; + client + .issue_cap(trail_id, "TaggedRecordAdmin", CapabilityIssueOptions::default()) + .await?; + + records + .add(Data::text("tagged record"), None, Some("finance".to_string())) + .build_and_execute(&client) + .await?; + + let deleted = records.delete(1).build_and_execute(&client).await?.output; + assert_eq!(deleted.sequence_number, 1); + assert_eq!(records.record_count().await?, 1); + assert!(records.get(1).await.is_err()); + + Ok(()) +} + +#[tokio::test] +async fn delete_record_requires_delete_permission() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("delete-perm")).await?; + let records = client.trail(trail_id).records(); + + grant_role_capability(&client, trail_id, "AddOnly", [Permission::AddRecord]).await?; + + let delete_result = records.delete(0).build_and_execute(&client).await; + assert!( + delete_result.is_err(), + "deleting without DeleteRecord permission must fail" + ); + assert!(records.get(0).await.is_ok(), "record should still exist"); + + Ok(()) +} + +#[tokio::test] +async fn delete_record_not_found_fails() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("delete-not-found")).await?; + let records = client.trail(trail_id).records(); + + grant_role_capability(&client, trail_id, "DeleteOnly", [Permission::DeleteRecord]).await?; + + let delete_missing = records.delete(999).build_and_execute(&client).await; + assert!(delete_missing.is_err(), "deleting a non-existent sequence should fail"); + + Ok(()) +} + +#[tokio::test] +async fn delete_record_fails_while_time_locked() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let created = client + .create_trail() + .with_initial_record(InitialRecord::new(Data::text("locked"), None, None)) + .with_locking_config(config_with_window(LockingWindow::TimeBased { seconds: 3600 })) + .finish() + .build_and_execute(&client) + .await? + .output; + let trail_id = created.trail_id; + let records = client.trail(trail_id).records(); + + grant_role_capability(&client, trail_id, "DeleteOnly", [Permission::DeleteRecord]).await?; + + let delete_locked = records.delete(0).build_and_execute(&client).await; + assert!(delete_locked.is_err(), "time-locked record deletion must fail"); + assert_eq!(records.record_count().await?, 1); + + Ok(()) +} + +#[tokio::test] +async fn sequence_numbers_do_not_reuse_deleted_slots() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("sequence-stability")).await?; + let records = client.trail(trail_id).records(); + + grant_role_capability( + &client, + trail_id, + "RecordAdmin", + [Permission::AddRecord, Permission::DeleteRecord], + ) + .await?; + + let first_added = records + .add(Data::text("first added"), None, None) + .build_and_execute(&client) + .await? + .output; + assert_eq!(first_added.sequence_number, 1); + + records.delete(1).build_and_execute(&client).await?; + + let second_added = records + .add(Data::text("second added"), None, None) + .build_and_execute(&client) + .await? + .output; + assert_eq!( + second_added.sequence_number, 2, + "new records must not reuse deleted sequence slots" + ); + + assert!(records.get(1).await.is_err(), "deleted sequence should remain absent"); + assert_eq!(records.record_count().await?, 2); + assert_text_data(records.get(2).await?.data, "second added"); + + Ok(()) +} + +#[tokio::test] +async fn delete_record_fails_while_count_locked() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let created = client + .create_trail() + .with_initial_record(InitialRecord::new(Data::text("count-locked"), None, None)) + .with_locking_config(config_with_window(LockingWindow::CountBased { count: 5 })) + .finish() + .build_and_execute(&client) + .await? + .output; + let trail_id = created.trail_id; + let records = client.trail(trail_id).records(); + + grant_role_capability(&client, trail_id, "DeleteOnly", [Permission::DeleteRecord]).await?; + + let delete_locked = records.delete(0).build_and_execute(&client).await; + assert!(delete_locked.is_err(), "count-locked record deletion must fail"); + assert_eq!(records.record_count().await?, 1); + + Ok(()) +} + +#[tokio::test] +async fn delete_records_batch_respects_limit_and_deletes_oldest_first() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let created = client + .create_trail() + .with_initial_record(InitialRecord::new(Data::text("batch-initial"), None, None)) + .with_locking_config(config_with_window(LockingWindow::TimeBased { seconds: 3600 })) + .finish() + .build_and_execute(&client) + .await? + .output; + + let trail_id = created.trail_id; + let records = client.trail(trail_id).records(); + + grant_role_capability( + &client, + trail_id, + "BatchRecordAdmin", + [Permission::AddRecord, Permission::DeleteAllRecords], + ) + .await?; + + records + .add(Data::text("batch-second"), None, None) + .build_and_execute(&client) + .await?; + records + .add(Data::text("batch-third"), None, None) + .build_and_execute(&client) + .await?; + + assert_eq!(records.record_count().await?, 3); + + let deleted_two = records.delete_records_batch(2).build_and_execute(&client).await?.output; + assert_eq!(deleted_two, 2, "batch delete should stop at the provided limit"); + assert_eq!(records.record_count().await?, 1); + assert!(records.get(0).await.is_err(), "oldest record should be removed first"); + assert!( + records.get(1).await.is_err(), + "second oldest record should also be removed" + ); + assert_text_data(records.get(2).await?.data, "batch-third"); + + let deleted_last = records + .delete_records_batch(10) + .build_and_execute(&client) + .await? + .output; + assert_eq!(deleted_last, 1, "remaining record should be deleted"); + assert_eq!(records.record_count().await?, 0); + + let deleted_empty = records + .delete_records_batch(10) + .build_and_execute(&client) + .await? + .output; + assert_eq!(deleted_empty, 0, "deleting from an empty trail should return zero"); + + Ok(()) +} + +#[tokio::test] +async fn delete_records_batch_requires_delete_all_records_permission() -> anyhow::Result<()> { + let admin = get_funded_test_client().await?; + let operator = get_funded_test_client().await?; + let trail_id = admin.create_test_trail(Data::text("batch-delete-permission")).await?; + let records = operator.trail(trail_id).records(); + + admin + .create_role(trail_id, "TrailDeleteOnly", [Permission::DeleteAuditTrail], None) + .await?; + admin + .issue_cap( + trail_id, + "TrailDeleteOnly", + CapabilityIssueOptions { + issued_to: Some(operator.sender_address()), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + + let denied = records.delete_records_batch(10).build_and_execute(&operator).await; + assert!( + denied.is_err(), + "batch deletion must require DeleteAllRecords permission" + ); + assert_eq!(admin.trail(trail_id).records().record_count().await?, 1); + + Ok(()) +} + +#[tokio::test] +async fn delete_records_batch_requires_matching_role_tag_access() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail_with_tags(Data::text("batch-delete-tagged-deny"), ["finance"]) + .await?; + + client + .create_role( + trail_id, + "TaggedWriter", + [Permission::AddRecord], + Some(RoleTags::new(["finance"])), + ) + .await?; + client + .create_role(trail_id, "DeleteAllWithoutTags", [Permission::DeleteAllRecords], None) + .await?; + client + .issue_cap(trail_id, "TaggedWriter", CapabilityIssueOptions::default()) + .await?; + client + .issue_cap(trail_id, "DeleteAllWithoutTags", CapabilityIssueOptions::default()) + .await?; + + client + .trail(trail_id) + .records() + .add(Data::text("tagged record"), None, Some("finance".to_string())) + .build_and_execute(&client) + .await?; + + let denied = client + .trail(trail_id) + .records() + .delete_records_batch(10) + .build_and_execute(&client) + .await; + + assert!( + denied.is_err(), + "tagged batch deletes should require matching role tag access" + ); + assert_eq!(client.trail(trail_id).records().record_count().await?, 2); + assert_eq!( + client.trail(trail_id).records().get(1).await?.tag.as_deref(), + Some("finance") + ); + + Ok(()) +} + +#[tokio::test] +async fn delete_records_batch_with_matching_role_tag_access_succeeds() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail_with_tags(Data::text("batch-delete-tagged-allow"), ["finance"]) + .await?; + let records = client.trail(trail_id).records(); + + client + .create_role( + trail_id, + "TaggedDeleteAll", + [Permission::AddRecord, Permission::DeleteAllRecords], + Some(RoleTags::new(["finance"])), + ) + .await?; + client + .issue_cap(trail_id, "TaggedDeleteAll", CapabilityIssueOptions::default()) + .await?; + + records + .add(Data::text("tagged record"), None, Some("finance".to_string())) + .build_and_execute(&client) + .await?; + + let deleted = records + .delete_records_batch(10) + .build_and_execute(&client) + .await? + .output; + assert_eq!(deleted, 2); + assert_eq!(records.record_count().await?, 0); + assert!(records.get(1).await.is_err()); + + Ok(()) +} + +#[tokio::test] +async fn list_and_pagination_support_sparse_sequence_numbers() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("pagination")).await?; + let records = client.trail(trail_id).records(); + + grant_role_capability( + &client, + trail_id, + "RecordAdmin", + [Permission::AddRecord, Permission::DeleteRecord], + ) + .await?; + + records + .add(Data::text("second"), Some("m2".to_string()), None) + .build_and_execute(&client) + .await?; + records + .add(Data::text("third"), Some("m3".to_string()), None) + .build_and_execute(&client) + .await?; + records.delete(1).build_and_execute(&client).await?; + + assert_eq!(records.record_count().await?, 2); + + let listed = records.list().await?; + assert_eq!(listed.len(), 2); + assert!(listed.contains_key(&0)); + assert!(listed.contains_key(&2)); + + let too_small = records.list_with_limit(1).await; + assert!(too_small.is_err(), "limit below table size should fail"); + + let page_1 = records.list_page(None, 1).await?; + assert_eq!(page_1.records.len(), 1); + assert!(page_1.records.contains_key(&0)); + assert!(page_1.has_next_page); + + let page_2 = records.list_page(page_1.next_cursor, 1).await?; + assert_eq!(page_2.records.len(), 1); + assert!(page_2.records.contains_key(&2)); + assert!(!page_2.has_next_page); + assert!(page_2.next_cursor.is_none()); + + Ok(()) +} + +#[tokio::test] +async fn list_and_pagination_multi_page_through_roundtrip() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("pagination-multi")).await?; + let records = client.trail(trail_id).records(); + + grant_role_capability( + &client, + trail_id, + "RecordAdmin", + [Permission::AddRecord, Permission::DeleteRecord], + ) + .await?; + + for (idx, label) in ["r1", "r2", "r3", "r4", "r5", "r6"].into_iter().enumerate() { + records + .add( + Data::text(format!("record-{label}")), + Some(format!("meta-{}", idx + 1)), + None, + ) + .build_and_execute(&client) + .await?; + } + + // Create sparse keys: 0,1,3,4,6 + records.delete(2).build_and_execute(&client).await?; + records.delete(5).build_and_execute(&client).await?; + + assert_eq!(records.record_count().await?, 5); + + let list = records.list().await?; + assert_eq!(list.len(), 5); + assert!(list.contains_key(&0)); + assert!(list.contains_key(&1)); + assert!(list.contains_key(&3)); + assert!(list.contains_key(&4)); + assert!(list.contains_key(&6)); + assert_text_data( + list.get(&4).expect("record with key 4 should exist").data.clone(), + "record-r4", + ); + + let limited = records.list_with_limit(5).await?; + assert_eq!(limited.len(), 5); + assert!(records.list_with_limit(4).await.is_err()); + + // limit=0 returns no records and keeps the traversal cursor at the starting position. + let empty_page = records.list_page(None, 0).await?; + assert!(empty_page.records.is_empty()); + assert!(empty_page.has_next_page); + assert!(empty_page.next_cursor.is_some()); + + let page_1 = records.list_page(None, 2).await?; + assert_eq!(page_1.records.len(), 2); + assert_eq!( + page_1.records.keys().copied().collect::>(), + vec![0, 1], + "page keys should be stable and ordered" + ); + assert!(page_1.records.contains_key(&0)); + assert!(page_1.records.contains_key(&1)); + assert!(page_1.has_next_page); + + let page_2 = records.list_page(page_1.next_cursor, 2).await?; + assert_eq!(page_2.records.len(), 2); + assert_eq!( + page_2.records.keys().copied().collect::>(), + vec![3, 4], + "page keys should be stable and ordered" + ); + assert!(page_2.records.contains_key(&3)); + assert!(page_2.records.contains_key(&4)); + assert!(page_2.has_next_page); + + let page_3 = records.list_page(page_2.next_cursor, 2).await?; + assert_eq!(page_3.records.len(), 1); + assert!(page_3.records.contains_key(&6)); + assert!(!page_3.has_next_page); + assert!(page_3.next_cursor.is_none()); + + Ok(()) +} + +#[tokio::test] +async fn list_page_cursor_validation_and_mid_cursor_start() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("pagination-cursor")).await?; + let records = client.trail(trail_id).records(); + + grant_role_capability( + &client, + trail_id, + "RecordAdmin", + [Permission::AddRecord, Permission::DeleteRecord], + ) + .await?; + + for label in ["r1", "r2", "r3", "r4"] { + records + .add(Data::text(format!("record-{label}")), None, None) + .build_and_execute(&client) + .await?; + } + + // Existing keys are now 0,1,2,3,4. + let middle_page = records.list_page(Some(2), 2).await?; + assert_eq!(middle_page.records.len(), 2); + assert_eq!( + middle_page.records.keys().copied().collect::>(), + vec![2, 3], + "page keys should be stable and ordered" + ); + assert!(middle_page.records.contains_key(&2)); + assert!(middle_page.records.contains_key(&3)); + assert!(middle_page.has_next_page); + + // Cursors that do not exist in the linked-table should fail. + let invalid_cursor = records.list_page(Some(999), 1).await; + assert!(invalid_cursor.is_err(), "an invalid cursor should produce an error"); + + Ok(()) +} + +#[tokio::test] +async fn list_page_rejects_limit_above_supported_max() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("pagination-cap")).await?; + let records = client.trail(trail_id).records(); + + let result = records.list_page(None, 1_001).await; + + match result { + Err(Error::InvalidArgument(message)) => { + assert!( + message.contains("exceeds max supported page size"), + "page-size cap error should be explicit: {message}" + ); + } + Err(other) => panic!("expected InvalidArgument for oversized limit, got {other}"), + Ok(page) => panic!("expected oversized limit error, got page: {page:?}"), + } + + Ok(()) +} diff --git a/audit-trail-rs/tests/e2e/trail.rs b/audit-trail-rs/tests/e2e/trail.rs new file mode 100644 index 00000000..4ade501b --- /dev/null +++ b/audit-trail-rs/tests/e2e/trail.rs @@ -0,0 +1,593 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use audit_trail::core::types::{ + CapabilityIssueOptions, Data, ImmutableMetadata, InitialRecord, LockingConfig, LockingWindow, Permission, RoleTags, + TimeLock, +}; +use iota_interaction::types::base_types::IotaAddress; +use product_common::core_client::CoreClient; + +use crate::client::get_funded_test_client; + +fn config_with_window(delete_record_window: LockingWindow) -> LockingConfig { + LockingConfig { + delete_record_window, + delete_trail_lock: TimeLock::None, + write_lock: TimeLock::None, + } +} + +#[tokio::test] +async fn create_trail_with_default_builder_settings() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + + let created = client + .create_trail() + .with_initial_record(InitialRecord::new(Data::text("audit-trail-create-default"), None, None)) + .finish() + .build_and_execute(&client) + .await? + .output; + + assert_eq!(created.creator, client.sender_address()); + + let on_chain = created.fetch_audit_trail(&client).await?; + assert_eq!(on_chain.id.object_id(), &created.trail_id); + assert_eq!(on_chain.creator, client.sender_address()); + assert_eq!(on_chain.sequence_number, 1); + assert_eq!(on_chain.locking_config, config_with_window(LockingWindow::None)); + assert!(on_chain.immutable_metadata.is_none()); + assert!(on_chain.updatable_metadata.is_none()); + + Ok(()) +} + +#[tokio::test] +async fn create_empty_trail_with_default_builder_settings() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + + let created = client.create_trail().finish().build_and_execute(&client).await?.output; + + assert_eq!(created.creator, client.sender_address()); + + let on_chain = created.fetch_audit_trail(&client).await?; + assert_eq!(on_chain.id.object_id(), &created.trail_id); + assert_eq!(on_chain.creator, client.sender_address()); + assert_eq!(on_chain.sequence_number, 0); + assert_eq!(on_chain.locking_config, config_with_window(LockingWindow::None)); + assert!(on_chain.immutable_metadata.is_none()); + assert!(on_chain.updatable_metadata.is_none()); + assert_eq!(client.trail(created.trail_id).records().record_count().await?, 0); + + Ok(()) +} + +#[tokio::test] +async fn create_trail_with_metadata_and_time_lock() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + + let immutable_metadata = + ImmutableMetadata::new("Trail Time Lock".to_string(), Some("immutable description".to_string())); + + let created = client + .create_trail() + .with_initial_record(InitialRecord::new( + Data::text("audit-trail-create-time-lock"), + Some("initial record metadata".to_string()), + None, + )) + .with_locking_config(config_with_window(LockingWindow::TimeBased { seconds: 300 })) + .with_trail_metadata(immutable_metadata.clone()) + .with_updatable_metadata("updatable metadata") + .finish() + .build_and_execute(&client) + .await? + .output; + + let on_chain = created.fetch_audit_trail(&client).await?; + assert_eq!( + on_chain.locking_config, + config_with_window(LockingWindow::TimeBased { seconds: 300 }) + ); + assert_eq!(on_chain.immutable_metadata, Some(immutable_metadata)); + assert_eq!(on_chain.updatable_metadata, Some("updatable metadata".to_string())); + + Ok(()) +} + +#[tokio::test] +async fn create_trail_with_bytes_and_count_lock() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + + let created = client + .create_trail() + .with_initial_record(InitialRecord::new( + Data::bytes(vec![0xAA, 0xBB, 0xCC, 0xDD]), + Some("bytes metadata".to_string()), + None, + )) + .with_locking_config(config_with_window(LockingWindow::CountBased { count: 3 })) + .with_trail_metadata_parts("Trail Count Lock", Some("count lock description".to_string())) + .finish() + .build_and_execute(&client) + .await? + .output; + + let on_chain = created.fetch_audit_trail(&client).await?; + assert_eq!( + on_chain.locking_config, + config_with_window(LockingWindow::CountBased { count: 3 }) + ); + assert_eq!(on_chain.sequence_number, 1); + + Ok(()) +} + +#[tokio::test] +async fn create_trail_with_custom_admin_address() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let custom_admin = IotaAddress::random_for_testing_only(); + + let created = client + .create_trail() + .with_admin(custom_admin) + .with_initial_record(InitialRecord::new(Data::text("audit-trail-custom-admin"), None, None)) + .finish() + .build_and_execute(&client) + .await? + .output; + + let cap = client.get_cap(custom_admin, created.trail_id).await; + + match cap { + Ok(cap_ref) => println!("Found admin capability with ID: {}", cap_ref.0), + Err(e) => println!("Error finding admin capability for custom admin: {e}"), + } + + Ok(()) +} + +#[tokio::test] +async fn get_returns_on_chain_trail() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + + let created = client + .create_trail() + .with_initial_record(InitialRecord::new(Data::text("trail-get-e2e"), None, None)) + .with_trail_metadata_parts("Get Test", Some("description".into())) + .with_updatable_metadata("initial updatable") + .finish() + .build_and_execute(&client) + .await? + .output; + + let trail = client.trail(created.trail_id); + let on_chain = trail.get().await?; + + assert_eq!(on_chain.id.object_id(), &created.trail_id); + assert_eq!(on_chain.creator, created.creator); + assert_eq!(on_chain.sequence_number, 1); + assert_eq!( + on_chain.immutable_metadata, + Some(ImmutableMetadata::new( + "Get Test".to_string(), + Some("description".to_string()) + )) + ); + assert_eq!(on_chain.updatable_metadata, Some("initial updatable".to_string())); + + Ok(()) +} + +#[tokio::test] +async fn get_trail_without_metadata() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + + let created = client + .create_trail() + .with_initial_record(InitialRecord::new(Data::text("trail-no-meta-e2e"), None, None)) + .finish() + .build_and_execute(&client) + .await? + .output; + + let on_chain = client.trail(created.trail_id).get().await?; + + assert!(on_chain.immutable_metadata.is_none()); + assert!(on_chain.updatable_metadata.is_none()); + + Ok(()) +} + +#[tokio::test] +async fn migrate_is_available_on_trail_handle() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client.create_test_trail(Data::text("trail-migrate-e2e")).await?; + + let handle_migrate = client.trail(trail_id).migrate().build_and_execute(&client).await; + + assert!( + handle_migrate.is_err(), + "new trails are already on latest package version, migrate should fail" + ); + + Ok(()) +} + +#[tokio::test] +async fn update_metadata_roundtrip() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + + let trail_id = client.create_test_trail(Data::text("trail-update-meta-e2e")).await?; + // Set initial updatable metadata via update_metadata + client + .create_role(trail_id, "MetadataAdmin", vec![Permission::UpdateMetadata], None) + .await?; + client + .issue_cap(trail_id, "MetadataAdmin", CapabilityIssueOptions::default()) + .await?; + + let trail = client.trail(trail_id); + + trail + .update_metadata(Some("before".to_string())) + .build_and_execute(&client) + .await?; + + let before = trail.get().await?; + assert_eq!(before.updatable_metadata, Some("before".to_string())); + + // Update to a new value + trail + .update_metadata(Some("after".to_string())) + .build_and_execute(&client) + .await?; + + let after = trail.get().await?; + assert_eq!(after.updatable_metadata, Some("after".to_string())); + + Ok(()) +} + +#[tokio::test] +async fn update_metadata_to_none_clears_value() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + + let trail_id = client.create_test_trail(Data::text("trail-clear-meta-e2e")).await?; + client + .create_role(trail_id, "MetadataAdmin", vec![Permission::UpdateMetadata], None) + .await?; + client + .issue_cap(trail_id, "MetadataAdmin", CapabilityIssueOptions::default()) + .await?; + + let trail = client.trail(trail_id); + + trail + .update_metadata(Some("to-be-cleared".to_string())) + .build_and_execute(&client) + .await?; + + trail.update_metadata(None).build_and_execute(&client).await?; + + let on_chain = trail.get().await?; + assert_eq!(on_chain.updatable_metadata, None); + + Ok(()) +} + +#[tokio::test] +async fn update_metadata_multiple_times() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + + let trail_id = client.create_test_trail(Data::text("trail-multi-meta-e2e")).await?; + client + .create_role(trail_id, "MetadataAdmin", vec![Permission::UpdateMetadata], None) + .await?; + client + .issue_cap(trail_id, "MetadataAdmin", CapabilityIssueOptions::default()) + .await?; + + let trail = client.trail(trail_id); + + // Set, then overwrite, then clear + trail + .update_metadata(Some("first".to_string())) + .build_and_execute(&client) + .await?; + + trail + .update_metadata(Some("second".to_string())) + .build_and_execute(&client) + .await?; + + trail.update_metadata(None).build_and_execute(&client).await?; + + let on_chain = trail.get().await?; + assert_eq!(on_chain.updatable_metadata, None); + + Ok(()) +} + +#[tokio::test] +async fn update_metadata_does_not_affect_immutable_metadata() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let immutable = ImmutableMetadata::new("Immutable Name".to_string(), Some("frozen".to_string())); + + let created = client + .create_trail() + .with_initial_record(InitialRecord::new(Data::text("trail-immutable-check-e2e"), None, None)) + .with_trail_metadata(immutable.clone()) + .with_updatable_metadata("mutable") + .finish() + .build_and_execute(&client) + .await? + .output; + + let trail_id = created.trail_id; + client + .create_role(trail_id, "MetadataAdmin", vec![Permission::UpdateMetadata], None) + .await?; + client + .issue_cap(trail_id, "MetadataAdmin", CapabilityIssueOptions::default()) + .await?; + + let trail = client.trail(trail_id); + + trail + .update_metadata(Some("changed".to_string())) + .build_and_execute(&client) + .await?; + + let on_chain = trail.get().await?; + assert_eq!(on_chain.immutable_metadata, Some(immutable)); + assert_eq!(on_chain.updatable_metadata, Some("changed".to_string())); + + Ok(()) +} + +#[tokio::test] +async fn update_metadata_requires_permission() -> anyhow::Result<()> { + let admin = get_funded_test_client().await?; + let metadata_user = get_funded_test_client().await?; + let trail_id = admin.create_test_trail(Data::text("trail-update-meta-denied")).await?; + + admin + .create_role(trail_id, "NoMetadataPerm", vec![Permission::AddRecord], None) + .await?; + admin + .issue_cap( + trail_id, + "NoMetadataPerm", + CapabilityIssueOptions { + issued_to: Some(metadata_user.sender_address()), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + + let updated = metadata_user + .trail(trail_id) + .update_metadata(Some("should fail".to_string())) + .build_and_execute(&metadata_user) + .await; + + assert!( + updated.is_err(), + "updating metadata without UpdateMetadata permission must fail" + ); + assert_eq!(admin.trail(trail_id).get().await?.updatable_metadata, None); + + Ok(()) +} + +#[tokio::test] +async fn revoked_capability_cannot_update_metadata() -> anyhow::Result<()> { + let admin = get_funded_test_client().await?; + let metadata_user = get_funded_test_client().await?; + let trail_id = admin.create_test_trail(Data::text("trail-update-meta-revoked")).await?; + + admin + .create_role(trail_id, "MetadataAdmin", vec![Permission::UpdateMetadata], None) + .await?; + let issued = admin + .issue_cap( + trail_id, + "MetadataAdmin", + CapabilityIssueOptions { + issued_to: Some(metadata_user.sender_address()), + ..CapabilityIssueOptions::default() + }, + ) + .await?; + + admin + .trail(trail_id) + .access() + .revoke_capability(issued.capability_id, issued.valid_until) + .build_and_execute(&admin) + .await?; + + let updated = metadata_user + .trail(trail_id) + .update_metadata(Some("should fail".to_string())) + .build_and_execute(&metadata_user) + .await; + + assert!(updated.is_err(), "revoked capabilities must not update metadata"); + assert_eq!(admin.trail(trail_id).get().await?.updatable_metadata, None); + + Ok(()) +} + +#[tokio::test] +async fn delete_audit_trail_fails_when_records_exist() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let trail_id = client + .create_test_trail(Data::text("trail-delete-not-empty-e2e")) + .await?; + client + .create_role(trail_id, "TrailDeleteOnly", vec![Permission::DeleteAuditTrail], None) + .await?; + client + .issue_cap(trail_id, "TrailDeleteOnly", CapabilityIssueOptions::default()) + .await?; + let trail = client.trail(trail_id); + + let delete_result = trail.delete_audit_trail().build_and_execute(&client).await; + assert!(delete_result.is_err(), "deleting a non-empty trail must fail"); + + let on_chain = trail.get().await?; + assert_eq!(on_chain.id.object_id(), &trail_id); + + Ok(()) +} + +#[tokio::test] +async fn delete_records_batch_then_delete_audit_trail_roundtrip() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let created = client + .create_trail() + .with_initial_record(InitialRecord::new(Data::text("trail-batch-delete-e2e"), None, None)) + .with_locking_config(config_with_window(LockingWindow::TimeBased { seconds: 3600 })) + .finish() + .build_and_execute(&client) + .await? + .output; + client + .create_role( + created.trail_id, + "TrailDeleteMaintenance", + vec![Permission::DeleteAllRecords, Permission::DeleteAuditTrail], + None, + ) + .await?; + client + .issue_cap( + created.trail_id, + "TrailDeleteMaintenance", + CapabilityIssueOptions::default(), + ) + .await?; + + let trail = client.trail(created.trail_id); + + let deleted = trail + .records() + .delete_records_batch(10) + .build_and_execute(&client) + .await? + .output; + assert_eq!(deleted, 1, "initial record should be deleted in batch"); + assert_eq!(trail.records().record_count().await?, 0); + + let deleted_trail = trail.delete_audit_trail().build_and_execute(&client).await?.output; + assert_eq!(deleted_trail.trail_id, created.trail_id); + assert!(deleted_trail.timestamp > 0); + + let fetch_deleted = trail.get().await; + assert!( + fetch_deleted.is_err(), + "trail object should no longer be readable after delete" + ); + + Ok(()) +} + +#[tokio::test] +async fn manage_record_tag_registry_roundtrip() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + + let created = client + .create_trail() + .with_initial_record(InitialRecord::new(Data::text("trail-tag-registry"), None, None)) + .with_record_tags(["finance"]) + .finish() + .build_and_execute(&client) + .await? + .output; + + let trail = client.trail(created.trail_id); + let initial = trail.get().await?; + assert_eq!(initial.tags.len(), 1); + assert!(initial.tags.contains_key("finance")); + + trail.tags().add("legal").build_and_execute(&client).await?; + let after_add = trail.get().await?; + assert!(after_add.tags.contains_key("finance")); + assert!(after_add.tags.contains_key("legal")); + + trail.tags().remove("legal").build_and_execute(&client).await?; + + let after_remove = trail.get().await?; + assert_eq!(after_remove.tags.len(), 1); + assert!(after_remove.tags.contains_key("finance")); + + Ok(()) +} + +#[tokio::test] +async fn remove_record_tag_rejects_in_use_tag() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let created = client + .create_trail() + .with_initial_record(InitialRecord::new(Data::text("trail-tag-in-use"), None, None)) + .with_record_tags(["finance"]) + .finish() + .build_and_execute(&client) + .await? + .output; + + client + .create_role( + created.trail_id, + "TaggedWriter", + vec![Permission::AddRecord], + Some(RoleTags::new(["finance"])), + ) + .await?; + client + .issue_cap(created.trail_id, "TaggedWriter", CapabilityIssueOptions::default()) + .await?; + + let trail = client.trail(created.trail_id); + trail + .records() + .add(Data::text("tagged"), None, Some("finance".to_string())) + .build_and_execute(&client) + .await?; + + let removed = trail.tags().remove("finance").build_and_execute(&client).await; + assert!(removed.is_err(), "used record tags must not be removable"); + + Ok(()) +} + +#[tokio::test] +async fn remove_record_tag_rejects_role_only_usage() -> anyhow::Result<()> { + let client = get_funded_test_client().await?; + let created = client + .create_trail() + .with_initial_record(InitialRecord::new(Data::text("trail-tag-role-usage"), None, None)) + .with_record_tags(["finance"]) + .finish() + .build_and_execute(&client) + .await? + .output; + + client + .create_role( + created.trail_id, + "TaggedWriter", + vec![Permission::AddRecord], + Some(RoleTags::new(["finance"])), + ) + .await?; + + let trail = client.trail(created.trail_id); + let removed = trail.tags().remove("finance").build_and_execute(&client).await; + assert!(removed.is_err(), "role-backed tags must not be removable"); + + Ok(()) +} diff --git a/bindings/wasm/audit_trail_wasm/Cargo.toml b/bindings/wasm/audit_trail_wasm/Cargo.toml new file mode 100644 index 00000000..bb509c64 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "audit_trail_wasm" +version = "0.1.0-alpha" +authors = ["IOTA Stiftung"] +edition = "2021" +homepage = "https://www.iota.org" +keywords = ["iota", "tangle", "audit-trail", "wasm"] +license = "Apache-2.0" +publish = false +readme = "README.md" +repository = "https://github.com/iotaledger/notarization.git" +resolver = "2" +description = "Web Assembly bindings for the audit_trail crate." + +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +anyhow = "1.0.95" +audit_trail = { path = "../../../audit-trail-rs", default-features = false, features = ["gas-station", "default-http-client"] } +bcs = "0.1.6" +console_error_panic_hook = { version = "0.1" } +iota_interaction = { git = "https://github.com/iotaledger/product-core.git", branch = "feat/tf-compoenents-dev", package = "iota_interaction", default-features = false } +iota_interaction_ts = { git = "https://github.com/iotaledger/product-core.git", branch = "feat/tf-compoenents-dev", package = "iota_interaction_ts" } +js-sys = { version = "0.3.61" } +product_common = { git = "https://github.com/iotaledger/product-core.git", branch = "feat/tf-compoenents-dev", package = "product_common", features = ["core-client", "transaction", "bindings", "binding-utils", "gas-station", "default-http-client"] } +serde = { version = "1.0", features = ["derive"] } +serde-wasm-bindgen = "0.6.5" +tokio = { version = "1.49.0", default-features = false, features = ["sync"] } +wasm-bindgen = { version = "0.2.100", features = ["serde-serialize"] } +wasm-bindgen-futures = { version = "0.4", default-features = false } + +[target.'cfg(all(target_arch = "wasm32", not(target_os = "wasi")))'.dependencies] +getrandom = { version = "0.3", default-features = false, features = ["wasm_js"] } + +[profile.release] +opt-level = 's' +lto = true + +[lints.clippy] +empty_docs = "allow" + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(wasm_bindgen_unstable_test_coverage)'] } diff --git a/bindings/wasm/audit_trail_wasm/README.md b/bindings/wasm/audit_trail_wasm/README.md new file mode 100644 index 00000000..7a9fcf75 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/README.md @@ -0,0 +1,74 @@ +# `audit_trail_wasm` + +`audit_trail_wasm` exposes the `audit_trail` Rust SDK to JavaScript and TypeScript consumers through `wasm-bindgen`. + +It is designed for browser and other `wasm32` environments that need: + +- read-only and signing audit-trail clients +- typed wrappers for trail handles, records, locking, access control, and tags +- serializable value and event types that map cleanly into JS/TS +- transaction wrappers that integrate with the shared `product_common` wasm transaction helpers + +## Main entry points + +- `AuditTrailClientReadOnly` for reads and inspected transactions +- `AuditTrailClient` for signed write flows +- `AuditTrailBuilder` for creating new trails +- `AuditTrailHandle` for trail-scoped APIs +- `TrailRecords`, `TrailLocking`, `TrailAccess`, and `TrailTags` for subsystem-specific operations + +## Choosing an entry point + +- Use `AuditTrailClientReadOnly` when you need reads, package resolution, or inspected transactions. +- Use `AuditTrailClient` when you also need typed write transaction builders. +- Use `AuditTrailHandle` after you already know the trail object ID and want to stay scoped to that trail. +- Use `AuditTrailBuilder` when you are preparing a create-trail transaction. + +## Data model wrappers + +The bindings expose JS-friendly wrappers for the most important Rust value types: + +- `Data` +- `Permission` and `PermissionSet` +- `RoleTags`, `RoleMap`, and `CapabilityIssueOptions` +- `TimeLock`, `LockingWindow`, and `LockingConfig` +- `Record`, `PaginatedRecord`, and `OnChainAuditTrail` +- event payloads such as `RecordAdded`, `RoleCreated`, and `CapabilityIssued` + +## Typical read flow + +1. Create an `AuditTrailClientReadOnly` or `AuditTrailClient`. +2. Resolve a trail handle with `.trail(trailId)`. +3. Read state with `.get()`, `.records().get(...)`, `.records().listPage(...)`, or `.locking().isRecordLocked(...)`. + +## Typical write flow + +1. Create an `AuditTrailClient` with a transaction signer. +2. Build a transaction from `client.createTrail()`, `client.trail(trailId)`, or one of the trail subsystem handles. +3. Convert that transaction wrapper into programmable transaction bytes. +4. Submit it through your surrounding JS transaction flow and feed the effects and events back into the typed `applyWithEvents(...)` helper. + +The bindings intentionally separate transaction construction from submission so browser apps, wallet integrations, and server-side signing flows can keep transport and execution policy outside the SDK. + +## Minimal TypeScript shape + +```ts +import { AuditTrailClientReadOnly } from "@iota/audit-trail-wasm"; + +const client = await AuditTrailClientReadOnly.create(iotaClient); +const trail = client.trail(trailId); +const state = await trail.get(); + +console.log(state.sequenceNumber); +``` + +## Build + +```bash +npm install +npm run build +``` + +## Examples + +See [examples/README.md](./examples/README.md) for runnable node and web example flows. diff --git a/bindings/wasm/audit_trail_wasm/cypress.config.ts b/bindings/wasm/audit_trail_wasm/cypress.config.ts new file mode 100644 index 00000000..481c7412 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/cypress.config.ts @@ -0,0 +1,33 @@ +import { defineConfig } from "cypress"; + +export default defineConfig({ + screenshotOnRunFailure: false, + video: false, + requestTimeout: 10000, + defaultCommandTimeout: 60000, + retries: { + runMode: 3, + }, + e2e: { + baseUrl: "http://localhost:5173", + supportFile: false, + setupNodeEvents(on, config) { + on("before:browser:launch", (browser, launchOptions) => { + if (browser.family === "firefox") { + // Fix to make subtle crypto work in cypress firefox + // https://github.com/cypress-io/cypress/issues/18217 + launchOptions.preferences[ + "network.proxy.testing_localhost_is_secure_when_hijacked" + ] = true; + // Temporary fix to allow cypress to control Firefox via CDP + // https://github.com/cypress-io/cypress/issues/29713 + // https://fxdx.dev/deprecating-cdp-support-in-firefox-embracing-the-future-with-webdriver-bidi/ + launchOptions.preferences[ + "remote.active-protocols" + ] = 3; + } + return launchOptions; + }); + }, + }, +}); diff --git a/bindings/wasm/audit_trail_wasm/cypress/Dockerfile b/bindings/wasm/audit_trail_wasm/cypress/Dockerfile new file mode 100644 index 00000000..c1b0cd24 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/cypress/Dockerfile @@ -0,0 +1,27 @@ +FROM cypress/browsers:latest + +ARG IOTA_AUDIT_TRAIL_PKG_ID + +ENV IOTA_AUDIT_TRAIL_PKG_ID=$IOTA_AUDIT_TRAIL_PKG_ID + +ARG IOTA_TF_COMPONENTS_PKG_ID + +ENV IOTA_TF_COMPONENTS_PKG_ID=$IOTA_TF_COMPONENTS_PKG_ID + +ARG NETWORK_NAME_FAUCET + +ENV NETWORK_NAME_FAUCET=$NETWORK_NAME_FAUCET + +ARG NETWORK_URL + +ENV NETWORK_URL=$NETWORK_URL + +COPY ./ /e2e + +WORKDIR /e2e/audit_trail_wasm + +RUN npm ci + +RUN npm run build:examples:web + +ENTRYPOINT [ "npm", "run" ] \ No newline at end of file diff --git a/bindings/wasm/audit_trail_wasm/cypress/app/index.html b/bindings/wasm/audit_trail_wasm/cypress/app/index.html new file mode 100644 index 00000000..5d4406c0 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/cypress/app/index.html @@ -0,0 +1,24 @@ + + + + + + + Audit Trail Example App + + +

+ + + \ No newline at end of file diff --git a/bindings/wasm/audit_trail_wasm/cypress/app/package-lock.json b/bindings/wasm/audit_trail_wasm/cypress/app/package-lock.json new file mode 100644 index 00000000..ffaa7de8 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/cypress/app/package-lock.json @@ -0,0 +1,3286 @@ +{ + "name": "vite-project", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "vite-project", + "version": "0.0.0", + "dependencies": { + "@iota/audit-trail": "file:../..", + "@iota/iota-sdk": "^1.0.0" + }, + "devDependencies": { + "typescript": "~5.7.2", + "vite": "^6.2.0", + "vite-plugin-node-polyfills": "^0.24.0" + } + }, + "../..": { + "name": "@iota/audit-trail", + "version": "0.1.0-alpha", + "license": "Apache-2.0", + "dependencies": { + "@iota/iota-interaction-ts": "^0.12.0" + }, + "devDependencies": { + "@types/mocha": "^9.1.0", + "@types/node": "^22.0.0", + "cypress": "^14.2.0", + "dprint": "^0.33.0", + "mocha": "^9.2.0", + "rimraf": "^6.0.1", + "start-server-and-test": "^2.0.11", + "ts-mocha": "^9.0.2", + "ts-node": "^10.9.2", + "tsconfig-paths": "^4.1.0", + "typedoc": "^0.28.5", + "typedoc-plugin-markdown": "^4.4.1", + "typescript": "^5.7.3", + "wasm-opt": "^1.4.0" + }, + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "@iota/iota-sdk": "^1.11.0" + } + }, + "node_modules/@0no-co/graphql.web": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@0no-co/graphql.web/-/graphql.web-1.2.0.tgz", + "integrity": "sha512-/1iHy9TTr63gE1YcR5idjx8UREz1s0kFhydf3bBLCXyqjhkIc6igAzTOx3zPifCwFR87tsh/4Pa9cNts6d2otw==", + "license": "MIT", + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0" + }, + "peerDependenciesMeta": { + "graphql": { + "optional": true + } + } + }, + "node_modules/@0no-co/graphqlsp": { + "version": "1.15.3", + "resolved": "https://registry.npmjs.org/@0no-co/graphqlsp/-/graphqlsp-1.15.3.tgz", + "integrity": "sha512-rap58Wh1qbRnGpPGwB60P6rvKF6G+mgo1kPeDySWIAcqkGMjuyQdrZPcHS6w7mKOT8i/f1UQmjow6+7vfuEXKw==", + "license": "MIT", + "dependencies": { + "@gql.tada/internal": "^1.0.0", + "graphql": "^15.5.0 || ^16.0.0 || ^17.0.0" + }, + "peerDependencies": { + "graphql": "^15.5.0 || ^16.0.0 || ^17.0.0", + "typescript": "^5.0.0 || ^6.0.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz", + "integrity": "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.12.tgz", + "integrity": "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.12.tgz", + "integrity": "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.12.tgz", + "integrity": "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.12.tgz", + "integrity": "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.12.tgz", + "integrity": "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.12.tgz", + "integrity": "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.12.tgz", + "integrity": "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.12.tgz", + "integrity": "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.12.tgz", + "integrity": "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.12.tgz", + "integrity": "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.12.tgz", + "integrity": "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.12.tgz", + "integrity": "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.12.tgz", + "integrity": "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.12.tgz", + "integrity": "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.12.tgz", + "integrity": "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.12.tgz", + "integrity": "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.12.tgz", + "integrity": "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.12.tgz", + "integrity": "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.12.tgz", + "integrity": "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.12.tgz", + "integrity": "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.12.tgz", + "integrity": "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.12.tgz", + "integrity": "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.12.tgz", + "integrity": "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.12.tgz", + "integrity": "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.12.tgz", + "integrity": "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@gql.tada/cli-utils": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/@gql.tada/cli-utils/-/cli-utils-1.7.3.tgz", + "integrity": "sha512-3iQY5E/jvv3Lnh6D1Mh7zr+Bb9C/TGk1DHkm+lbIjQBnZAu2m+BcTcr1e3spUt6Aa6HG/xAN2XxpbWw9oZALEg==", + "license": "MIT", + "dependencies": { + "@0no-co/graphqlsp": "^1.12.13", + "@gql.tada/internal": "1.0.9", + "graphql": "^15.5.0 || ^16.0.0 || ^17.0.0" + }, + "peerDependencies": { + "@0no-co/graphqlsp": "^1.12.13", + "@gql.tada/svelte-support": "1.0.2", + "@gql.tada/vue-support": "1.0.2", + "graphql": "^15.5.0 || ^16.0.0 || ^17.0.0", + "typescript": "^5.0.0 || ^6.0.0" + }, + "peerDependenciesMeta": { + "@gql.tada/svelte-support": { + "optional": true + }, + "@gql.tada/vue-support": { + "optional": true + } + } + }, + "node_modules/@gql.tada/internal": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/@gql.tada/internal/-/internal-1.0.9.tgz", + "integrity": "sha512-Bp8yi+kLrzIJ3l5Dfxhz48H4OCH2LCX+pShaPcJgh+oiBt6clrjUKDYNDD3Z78aDQ3+Tyrxe4dd0MfLgpSLPPg==", + "license": "MIT", + "dependencies": { + "@0no-co/graphql.web": "^1.0.5" + }, + "peerDependencies": { + "graphql": "^15.5.0 || ^16.0.0 || ^17.0.0", + "typescript": "^5.0.0 || ^6.0.0" + } + }, + "node_modules/@graphql-typed-document-node/core": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@graphql-typed-document-node/core/-/core-3.2.0.tgz", + "integrity": "sha512-mB9oAsNCm9aM3/SOv4YtBMqZbYj10R7dkq8byBqxGY/ncFwhf2oQzMV+LCRlWoDSEBJ3COiR1yeDvMtsoOsuFQ==", + "license": "MIT", + "peerDependencies": { + "graphql": "^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@iota/audit-trail": { + "resolved": "../..", + "link": true + }, + "node_modules/@iota/bcs": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@iota/bcs/-/bcs-1.5.0.tgz", + "integrity": "sha512-/hv395YtUcRNLY00v7Cl2O+KvVUaUajg4OucZENgSE4Xu1ygUGsLD3dU5FixOUVOn7Abo+n7+KYr9PE/1dsvWg==", + "license": "Apache-2.0", + "dependencies": { + "@scure/base": "^1.2.4" + } + }, + "node_modules/@iota/iota-sdk": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/@iota/iota-sdk/-/iota-sdk-1.11.0.tgz", + "integrity": "sha512-Fveg/4euheaBUzU1ybPyFGe7sSfLFUjLNHhPjNFUmSBOMR+l9q3LU1QdN2sLElcmgJZ+BLxAEmL8TZ0eX3Khpw==", + "license": "Apache-2.0", + "dependencies": { + "@graphql-typed-document-node/core": "^3.2.0", + "@iota/bcs": "1.5.0", + "@noble/curves": "^1.4.2", + "@noble/hashes": "^1.4.0", + "@scure/base": "^1.2.4", + "@scure/bip32": "^1.4.0", + "@scure/bip39": "^1.3.0", + "bignumber.js": "^9.1.1", + "gql.tada": "^1.8.2", + "graphql": "^16.9.0", + "valibot": "^1.2.0" + }, + "engines": { + "node": ">=24" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@noble/curves": { + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/@noble/curves/-/curves-1.9.7.tgz", + "integrity": "sha512-gbKGcRUYIjA3/zCCNaWDciTMFI0dCkvou3TL8Zmy5Nc7sJ47a0jtOeZoTaMxkuqRo9cRhjOdZJXegxYE5FN/xw==", + "license": "MIT", + "dependencies": { + "@noble/hashes": "1.8.0" + }, + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@noble/hashes": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.8.0.tgz", + "integrity": "sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A==", + "license": "MIT", + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@rollup/plugin-inject": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/@rollup/plugin-inject/-/plugin-inject-5.0.5.tgz", + "integrity": "sha512-2+DEJbNBoPROPkgTDNe8/1YXWcqxbN5DTjASVIOx8HS+pITXushyNiBV56RB08zuptzz8gT3YfkqriTBVycepg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rollup/pluginutils": "^5.0.1", + "estree-walker": "^2.0.2", + "magic-string": "^0.30.3" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "node_modules/@rollup/pluginutils": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.3.0.tgz", + "integrity": "sha512-5EdhGZtnu3V88ces7s53hhfK5KSASnJZv8Lulpc04cWO3REESroJXg73DFsOmgbU2BhwV0E20bu2IDZb3VKW4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-walker": "^2.0.2", + "picomatch": "^4.0.2" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.1.tgz", + "integrity": "sha512-d6FinEBLdIiK+1uACUttJKfgZREXrF0Qc2SmLII7W2AD8FfiZ9Wjd+rD/iRuf5s5dWrr1GgwXCvPqOuDquOowA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.1.tgz", + "integrity": "sha512-YjG/EwIDvvYI1YvYbHvDz/BYHtkY4ygUIXHnTdLhG+hKIQFBiosfWiACWortsKPKU/+dUwQQCKQM3qrDe8c9BA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.1.tgz", + "integrity": "sha512-mjCpF7GmkRtSJwon+Rq1N8+pI+8l7w5g9Z3vWj4T7abguC4Czwi3Yu/pFaLvA3TTeMVjnu3ctigusqWUfjZzvw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.1.tgz", + "integrity": "sha512-haZ7hJ1JT4e9hqkoT9R/19XW2QKqjfJVv+i5AGg57S+nLk9lQnJ1F/eZloRO3o9Scy9CM3wQ9l+dkXtcBgN5Ew==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.1.tgz", + "integrity": "sha512-czw90wpQq3ZsAVBlinZjAYTKduOjTywlG7fEeWKUA7oCmpA8xdTkxZZlwNJKWqILlq0wehoZcJYfBvOyhPTQ6w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.1.tgz", + "integrity": "sha512-KVB2rqsxTHuBtfOeySEyzEOB7ltlB/ux38iu2rBQzkjbwRVlkhAGIEDiiYnO2kFOkJp+Z7pUXKyrRRFuFUKt+g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.1.tgz", + "integrity": "sha512-L+34Qqil+v5uC0zEubW7uByo78WOCIrBvci69E7sFASRl0X7b/MB6Cqd1lky/CtcSVTydWa2WZwFuWexjS5o6g==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.1.tgz", + "integrity": "sha512-n83O8rt4v34hgFzlkb1ycniJh7IR5RCIqt6mz1VRJD6pmhRi0CXdmfnLu9dIUS6buzh60IvACM842Ffb3xd6Gg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.1.tgz", + "integrity": "sha512-Nql7sTeAzhTAja3QXeAI48+/+GjBJ+QmAH13snn0AJSNL50JsDqotyudHyMbO2RbJkskbMbFJfIJKWA6R1LCJQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.1.tgz", + "integrity": "sha512-+pUymDhd0ys9GcKZPPWlFiZ67sTWV5UU6zOJat02M1+PiuSGDziyRuI/pPue3hoUwm2uGfxdL+trT6Z9rxnlMA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.1.tgz", + "integrity": "sha512-VSvgvQeIcsEvY4bKDHEDWcpW4Yw7BtlKG1GUT4FzBUlEKQK0rWHYBqQt6Fm2taXS+1bXvJT6kICu5ZwqKCnvlQ==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.1.tgz", + "integrity": "sha512-4LqhUomJqwe641gsPp6xLfhqWMbQV04KtPp7/dIp0nzPxAkNY1AbwL5W0MQpcalLYk07vaW9Kp1PBhdpZYYcEw==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.1.tgz", + "integrity": "sha512-tLQQ9aPvkBxOc/EUT6j3pyeMD6Hb8QF2BTBnCQWP/uu1lhc9AIrIjKnLYMEroIz/JvtGYgI9dF3AxHZNaEH0rw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.1.tgz", + "integrity": "sha512-RMxFhJwc9fSXP6PqmAz4cbv3kAyvD1etJFjTx4ONqFP9DkTkXsAMU4v3Vyc5BgzC+anz7nS/9tp4obsKfqkDHg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.1.tgz", + "integrity": "sha512-QKgFl+Yc1eEk6MmOBfRHYF6lTxiiiV3/z/BRrbSiW2I7AFTXoBFvdMEyglohPj//2mZS4hDOqeB0H1ACh3sBbg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.1.tgz", + "integrity": "sha512-RAjXjP/8c6ZtzatZcA1RaQr6O1TRhzC+adn8YZDnChliZHviqIjmvFwHcxi4JKPSDAt6Uhf/7vqcBzQJy0PDJg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.1.tgz", + "integrity": "sha512-wcuocpaOlaL1COBYiA89O6yfjlp3RwKDeTIA0hM7OpmhR1Bjo9j31G1uQVpDlTvwxGn2nQs65fBFL5UFd76FcQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.1.tgz", + "integrity": "sha512-77PpsFQUCOiZR9+LQEFg9GClyfkNXj1MP6wRnzYs0EeWbPcHs02AXu4xuUbM1zhwn3wqaizle3AEYg5aeoohhg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.1.tgz", + "integrity": "sha512-5cIATbk5vynAjqqmyBjlciMJl1+R/CwX9oLk/EyiFXDWd95KpHdrOJT//rnUl4cUcskrd0jCCw3wpZnhIHdD9w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.1.tgz", + "integrity": "sha512-cl0w09WsCi17mcmWqqglez9Gk8isgeWvoUZ3WiJFYSR3zjBQc2J5/ihSjpl+VLjPqjQ/1hJRcqBfLjssREQILw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.1.tgz", + "integrity": "sha512-4Cv23ZrONRbNtbZa37mLSueXUCtN7MXccChtKpUnQNgF010rjrjfHx3QxkS2PI7LqGT5xXyYs1a7LbzAwT0iCA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.1.tgz", + "integrity": "sha512-i1okWYkA4FJICtr7KpYzFpRTHgy5jdDbZiWfvny21iIKky5YExiDXP+zbXzm3dUcFpkEeYNHgQ5fuG236JPq0g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.1.tgz", + "integrity": "sha512-u09m3CuwLzShA0EYKMNiFgcjjzwqtUMLmuCJLeZWjjOYA3IT2Di09KaxGBTP9xVztWyIWjVdsB2E9goMjZvTQg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.1.tgz", + "integrity": "sha512-k+600V9Zl1CM7eZxJgMyTUzmrmhB/0XZnF4pRypKAlAgxmedUA+1v9R+XOFv56W4SlHEzfeMtzujLJD22Uz5zg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.1.tgz", + "integrity": "sha512-lWMnixq/QzxyhTV6NjQJ4SFo1J6PvOX8vUx5Wb4bBPsEb+8xZ89Bz6kOXpfXj9ak9AHTQVQzlgzBEc1SyM27xQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@scure/base": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.2.6.tgz", + "integrity": "sha512-g/nm5FgUa//MCj1gV09zTJTaM6KBAHqLN907YVQqf7zC49+DcO4B1so4ZX07Ef10Twr6nuqYEH9GEggFXA4Fmg==", + "license": "MIT", + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@scure/bip32": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/@scure/bip32/-/bip32-1.7.0.tgz", + "integrity": "sha512-E4FFX/N3f4B80AKWp5dP6ow+flD1LQZo/w8UnLGYZO674jS6YnYeepycOOksv+vLPSpgN35wgKgy+ybfTb2SMw==", + "license": "MIT", + "dependencies": { + "@noble/curves": "~1.9.0", + "@noble/hashes": "~1.8.0", + "@scure/base": "~1.2.5" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@scure/bip39": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@scure/bip39/-/bip39-1.6.0.tgz", + "integrity": "sha512-+lF0BbLiJNwVlev4eKelw1WWLaiKXw7sSl8T6FvBlWkdX+94aGJ4o8XjUdlyhTCjd8c+B3KT3JfS8P0bLRNU6A==", + "license": "MIT", + "dependencies": { + "@noble/hashes": "~1.8.0", + "@scure/base": "~1.2.5" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/asn1.js": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-4.10.1.tgz", + "integrity": "sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "bn.js": "^4.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/asn1.js/node_modules/bn.js": { + "version": "4.12.3", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.3.tgz", + "integrity": "sha512-fGTi3gxV/23FTYdAoUtLYp6qySe2KE3teyZitipKNRuVYcBkoP/bB3guXN/XVKUe9mxCHXnc9C4ocyz8OmgN0g==", + "dev": true, + "license": "MIT" + }, + "node_modules/assert": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/assert/-/assert-2.1.0.tgz", + "integrity": "sha512-eLHpSK/Y4nhMJ07gDaAzoX/XAKS8PSaojml3M0DM4JpV1LAi5JOJ/p6H/XWrl8L+DzVEvVCW1z3vWAaB9oTsQw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "is-nan": "^1.3.2", + "object-is": "^1.1.5", + "object.assign": "^4.1.4", + "util": "^0.12.5" + } + }, + "node_modules/available-typed-arrays": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/bignumber.js": { + "version": "9.3.1", + "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.1.tgz", + "integrity": "sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ==", + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/bn.js": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.3.tgz", + "integrity": "sha512-EAcmnPkxpntVL+DS7bO1zhcZNvCkxqtkd0ZY53h06GNQ3DEkkGZ/gKgmDv6DdZQGj9BgfSPKtJJ7Dp1GPP8f7w==", + "dev": true, + "license": "MIT" + }, + "node_modules/brorand": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", + "integrity": "sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w==", + "dev": true, + "license": "MIT" + }, + "node_modules/browser-resolve": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/browser-resolve/-/browser-resolve-2.0.0.tgz", + "integrity": "sha512-7sWsQlYL2rGLy2IWm8WL8DCTJvYLc/qlOnsakDac87SOoCd16WLsaAMdCiAqsTNHIe+SXfaqyxyo6THoWqs8WQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve": "^1.17.0" + } + }, + "node_modules/browserify-aes": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", + "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-xor": "^1.0.3", + "cipher-base": "^1.0.0", + "create-hash": "^1.1.0", + "evp_bytestokey": "^1.0.3", + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/browserify-cipher": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz", + "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "browserify-aes": "^1.0.4", + "browserify-des": "^1.0.0", + "evp_bytestokey": "^1.0.0" + } + }, + "node_modules/browserify-des": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz", + "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "cipher-base": "^1.0.1", + "des.js": "^1.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "node_modules/browserify-rsa": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.1.1.tgz", + "integrity": "sha512-YBjSAiTqM04ZVei6sXighu679a3SqWORA3qZTEqZImnlkDIFtKc6pNutpjyZ8RJTjQtuYfeetkxM11GwoYXMIQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "bn.js": "^5.2.1", + "randombytes": "^2.1.0", + "safe-buffer": "^5.2.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/browserify-sign": { + "version": "4.2.5", + "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.5.tgz", + "integrity": "sha512-C2AUdAJg6rlM2W5QMp2Q4KGQMVBwR1lIimTsUnutJ8bMpW5B52pGpR2gEnNBNwijumDo5FojQ0L9JrXA8m4YEw==", + "dev": true, + "license": "ISC", + "dependencies": { + "bn.js": "^5.2.2", + "browserify-rsa": "^4.1.1", + "create-hash": "^1.2.0", + "create-hmac": "^1.1.7", + "elliptic": "^6.6.1", + "inherits": "^2.0.4", + "parse-asn1": "^5.1.9", + "readable-stream": "^2.3.8", + "safe-buffer": "^5.2.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/browserify-sign/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/browserify-sign/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dev": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/browserify-sign/node_modules/readable-stream/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/browserify-sign/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/browserify-sign/node_modules/string_decoder/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/browserify-zlib": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", + "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pako": "~1.0.5" + } + }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/buffer-xor": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz", + "integrity": "sha512-571s0T7nZWK6vB67HI5dyUF7wXiNcfaPPPTl6zYCNApANjIvYJTg7hlud/+cJpdAhS7dVzqMLmfhfHR3rAcOjQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/builtin-status-codes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz", + "integrity": "sha512-HpGFw18DgFWlncDfjTa2rcQ4W88O1mC8e8yZ2AvQY5KDaktSTwo+KRf6nHK6FRI5FyRyb/5T6+TSxfP7QyGsmQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/call-bind": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/cipher-base": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.7.tgz", + "integrity": "sha512-Mz9QMT5fJe7bKI7MH31UilT5cEK5EHHRCccw/YRFsRY47AuNgaV6HY3rscp0/I4Q+tTW/5zoqpSeRRI54TkDWA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.4", + "safe-buffer": "^5.2.1", + "to-buffer": "^1.2.2" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/console-browserify": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz", + "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==", + "dev": true + }, + "node_modules/constants-browserify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz", + "integrity": "sha512-xFxOwqIzR/e1k1gLiWEophSCMqXcwVHIH7akf7b/vxcUeGunlj3hvZaaqxwHsTgn+IndtkQJgSztIDWeumWJDQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/create-ecdh": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz", + "integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "bn.js": "^4.1.0", + "elliptic": "^6.5.3" + } + }, + "node_modules/create-ecdh/node_modules/bn.js": { + "version": "4.12.3", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.3.tgz", + "integrity": "sha512-fGTi3gxV/23FTYdAoUtLYp6qySe2KE3teyZitipKNRuVYcBkoP/bB3guXN/XVKUe9mxCHXnc9C4ocyz8OmgN0g==", + "dev": true, + "license": "MIT" + }, + "node_modules/create-hash": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", + "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cipher-base": "^1.0.1", + "inherits": "^2.0.1", + "md5.js": "^1.3.4", + "ripemd160": "^2.0.1", + "sha.js": "^2.4.0" + } + }, + "node_modules/create-hmac": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", + "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cipher-base": "^1.0.3", + "create-hash": "^1.1.0", + "inherits": "^2.0.1", + "ripemd160": "^2.0.0", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + } + }, + "node_modules/create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/crypto-browserify": { + "version": "3.12.1", + "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.1.tgz", + "integrity": "sha512-r4ESw/IlusD17lgQi1O20Fa3qNnsckR126TdUuBgAu7GBYSIPvdNyONd3Zrxh0xCwA4+6w/TDArBPsMvhur+KQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "browserify-cipher": "^1.0.1", + "browserify-sign": "^4.2.3", + "create-ecdh": "^4.0.4", + "create-hash": "^1.2.0", + "create-hmac": "^1.1.7", + "diffie-hellman": "^5.0.3", + "hash-base": "~3.0.4", + "inherits": "^2.0.4", + "pbkdf2": "^3.1.2", + "public-encrypt": "^4.0.3", + "randombytes": "^2.1.0", + "randomfill": "^1.0.4" + }, + "engines": { + "node": ">= 0.10" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/des.js": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/des.js/-/des.js-1.1.0.tgz", + "integrity": "sha512-r17GxjhUCjSRy8aiJpr8/UadFIzMzJGexI3Nmz4ADi9LYSFx4gTBp80+NaX/YsXWWLhpZ7v/v/ubEc/bCNfKwg==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/diffie-hellman": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", + "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", + "dev": true, + "license": "MIT", + "dependencies": { + "bn.js": "^4.1.0", + "miller-rabin": "^4.0.0", + "randombytes": "^2.0.0" + } + }, + "node_modules/diffie-hellman/node_modules/bn.js": { + "version": "4.12.3", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.3.tgz", + "integrity": "sha512-fGTi3gxV/23FTYdAoUtLYp6qySe2KE3teyZitipKNRuVYcBkoP/bB3guXN/XVKUe9mxCHXnc9C4ocyz8OmgN0g==", + "dev": true, + "license": "MIT" + }, + "node_modules/domain-browser": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-4.22.0.tgz", + "integrity": "sha512-IGBwjF7tNk3cwypFNH/7bfzBcgSCbaMOD3GsaY1AU/JRrnHnYgEM0+9kQt52iZxjNsjBtJYtao146V+f8jFZNw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://bevry.me/fund" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/elliptic": { + "version": "6.6.1", + "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.6.1.tgz", + "integrity": "sha512-RaddvvMatK2LJHqFJ+YA4WysVN5Ita9E35botqIYspQ4TkRAlCicdzKOjlyv/1Za5RyTNn7di//eEV0uTAfe3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "bn.js": "^4.11.9", + "brorand": "^1.1.0", + "hash.js": "^1.0.0", + "hmac-drbg": "^1.0.1", + "inherits": "^2.0.4", + "minimalistic-assert": "^1.0.1", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "node_modules/elliptic/node_modules/bn.js": { + "version": "4.12.3", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.3.tgz", + "integrity": "sha512-fGTi3gxV/23FTYdAoUtLYp6qySe2KE3teyZitipKNRuVYcBkoP/bB3guXN/XVKUe9mxCHXnc9C4ocyz8OmgN0g==", + "dev": true, + "license": "MIT" + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/esbuild": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.12.tgz", + "integrity": "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.12", + "@esbuild/android-arm": "0.25.12", + "@esbuild/android-arm64": "0.25.12", + "@esbuild/android-x64": "0.25.12", + "@esbuild/darwin-arm64": "0.25.12", + "@esbuild/darwin-x64": "0.25.12", + "@esbuild/freebsd-arm64": "0.25.12", + "@esbuild/freebsd-x64": "0.25.12", + "@esbuild/linux-arm": "0.25.12", + "@esbuild/linux-arm64": "0.25.12", + "@esbuild/linux-ia32": "0.25.12", + "@esbuild/linux-loong64": "0.25.12", + "@esbuild/linux-mips64el": "0.25.12", + "@esbuild/linux-ppc64": "0.25.12", + "@esbuild/linux-riscv64": "0.25.12", + "@esbuild/linux-s390x": "0.25.12", + "@esbuild/linux-x64": "0.25.12", + "@esbuild/netbsd-arm64": "0.25.12", + "@esbuild/netbsd-x64": "0.25.12", + "@esbuild/openbsd-arm64": "0.25.12", + "@esbuild/openbsd-x64": "0.25.12", + "@esbuild/openharmony-arm64": "0.25.12", + "@esbuild/sunos-x64": "0.25.12", + "@esbuild/win32-arm64": "0.25.12", + "@esbuild/win32-ia32": "0.25.12", + "@esbuild/win32-x64": "0.25.12" + } + }, + "node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "dev": true, + "license": "MIT" + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/evp_bytestokey": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", + "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "md5.js": "^1.3.4", + "safe-buffer": "^5.1.1" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/for-each": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", + "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/generator-function": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/generator-function/-/generator-function-2.0.1.tgz", + "integrity": "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gql.tada": { + "version": "1.9.2", + "resolved": "https://registry.npmjs.org/gql.tada/-/gql.tada-1.9.2.tgz", + "integrity": "sha512-QxRHVpxtrOVdYXz6oavq0lBM+Zdp0swapLGJcD4SLpXDcsD337BHDFrzqqjfkbepv0sSAiO0LGabu1kI5D5Gyg==", + "license": "MIT", + "dependencies": { + "@0no-co/graphql.web": "^1.0.5", + "@0no-co/graphqlsp": "^1.12.13", + "@gql.tada/cli-utils": "1.7.3", + "@gql.tada/internal": "1.0.9" + }, + "bin": { + "gql-tada": "bin/cli.js", + "gql.tada": "bin/cli.js" + }, + "peerDependencies": { + "typescript": "^5.0.0 || ^6.0.0" + } + }, + "node_modules/graphql": { + "version": "16.13.2", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.13.2.tgz", + "integrity": "sha512-5bJ+nf/UCpAjHM8i06fl7eLyVC9iuNAjm9qzkiu2ZGhM0VscSvS6WDPfAwkdkBuoXGM9FJSbKl6wylMwP9Ktig==", + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hash-base": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.0.5.tgz", + "integrity": "sha512-vXm0l45VbcHEVlTCzs8M+s0VeYsB2lnlAaThoLKGXr3bE/VWDOelNUnycUPEhKEaXARL2TEFjBOyUiM6+55KBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.4", + "safe-buffer": "^5.2.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/hash.js": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", + "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "minimalistic-assert": "^1.0.1" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hmac-drbg": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", + "integrity": "sha512-Tti3gMqLdZfhOQY1Mzf/AanLiqh1WTiJgEj26ZuYQ9fbkLomzGchCws4FyrSd4VkpBfiNhaE1On+lOz894jvXg==", + "dev": true, + "license": "MIT", + "dependencies": { + "hash.js": "^1.0.3", + "minimalistic-assert": "^1.0.0", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "node_modules/https-browserify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz", + "integrity": "sha512-J+FkSdyD+0mA0N+81tMotaRMfSL9SGi+xpD3T6YApKsc3bGSXJlfXri3VyFOeYkfLRQisDk1W+jIFFKBeUBbBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-arguments": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.2.0.tgz", + "integrity": "sha512-7bVbi0huj/wrIAOzb8U1aszg9kdi3KN/CyU19CTI7tAoZYEZoL9yCDXpbXN+uPsuWnP02cyug1gleqq+TU+YCA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-generator-function": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.2.tgz", + "integrity": "sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.4", + "generator-function": "^2.0.0", + "get-proto": "^1.0.1", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-nan": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/is-nan/-/is-nan-1.3.2.tgz", + "integrity": "sha512-E+zBKpQ2t6MEo1VsonYmluk9NxGrbzpeeLC2xIViuO2EjU2xsXsBPwTr3Ykv9l08UYEVEdWeRZNouaZqF6RN0w==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-regex": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz", + "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz", + "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true, + "license": "MIT" + }, + "node_modules/isomorphic-timers-promises": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/isomorphic-timers-promises/-/isomorphic-timers-promises-1.0.1.tgz", + "integrity": "sha512-u4sej9B1LPSxTGKB/HiuzvEQnXH0ECYkSVQU39koSwmFAxhlEAFl9RdTvLv4TOTQUgBS5O3O5fwUxk6byBZ+IQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/md5.js": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", + "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", + "dev": true, + "license": "MIT", + "dependencies": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "node_modules/miller-rabin": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz", + "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "bn.js": "^4.0.0", + "brorand": "^1.0.1" + }, + "bin": { + "miller-rabin": "bin/miller-rabin" + } + }, + "node_modules/miller-rabin/node_modules/bn.js": { + "version": "4.12.3", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.3.tgz", + "integrity": "sha512-fGTi3gxV/23FTYdAoUtLYp6qySe2KE3teyZitipKNRuVYcBkoP/bB3guXN/XVKUe9mxCHXnc9C4ocyz8OmgN0g==", + "dev": true, + "license": "MIT" + }, + "node_modules/minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==", + "dev": true, + "license": "ISC" + }, + "node_modules/minimalistic-crypto-utils": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", + "integrity": "sha512-JIYlbt6g8i5jKfJ3xz7rF0LXmv2TkDxBLUkiBeZ7bAx4GnnNMr8xFpGnOxn6GhTEHx3SjRrZEoU+j04prX1ktg==", + "dev": true, + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/node-stdlib-browser": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/node-stdlib-browser/-/node-stdlib-browser-1.3.1.tgz", + "integrity": "sha512-X75ZN8DCLftGM5iKwoYLA3rjnrAEs97MkzvSd4q2746Tgpg8b8XWiBGiBG4ZpgcAqBgtgPHTiAc8ZMCvZuikDw==", + "dev": true, + "license": "MIT", + "dependencies": { + "assert": "^2.0.0", + "browser-resolve": "^2.0.0", + "browserify-zlib": "^0.2.0", + "buffer": "^5.7.1", + "console-browserify": "^1.1.0", + "constants-browserify": "^1.0.0", + "create-require": "^1.1.1", + "crypto-browserify": "^3.12.1", + "domain-browser": "4.22.0", + "events": "^3.0.0", + "https-browserify": "^1.0.0", + "isomorphic-timers-promises": "^1.0.1", + "os-browserify": "^0.3.0", + "path-browserify": "^1.0.1", + "pkg-dir": "^5.0.0", + "process": "^0.11.10", + "punycode": "^1.4.1", + "querystring-es3": "^0.2.1", + "readable-stream": "^3.6.0", + "stream-browserify": "^3.0.0", + "stream-http": "^3.2.0", + "string_decoder": "^1.0.0", + "timers-browserify": "^2.0.4", + "tty-browserify": "0.0.1", + "url": "^0.11.4", + "util": "^0.12.4", + "vm-browserify": "^1.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-is": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.6.tgz", + "integrity": "sha512-F8cZ+KfGlSGi09lJT7/Nd6KJZ9ygtvYC0/UYYLI9nmQKLMnydpB9yvbv9K1uSkEu7FU9vYPmVwLg328tX+ot3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.7", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.7.tgz", + "integrity": "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + "has-symbols": "^1.1.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/os-browserify": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", + "integrity": "sha512-gjcpUc3clBf9+210TRaDWbf+rZZZEshZ+DlXMRCeAjp0xhTrnQsKHypIy1J3d5hKdUzj69t708EHtU8P6bUn0A==", + "dev": true, + "license": "MIT" + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pako": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", + "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==", + "dev": true, + "license": "(MIT AND Zlib)" + }, + "node_modules/parse-asn1": { + "version": "5.1.9", + "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.9.tgz", + "integrity": "sha512-fIYNuZ/HastSb80baGOuPRo1O9cf4baWw5WsAp7dBuUzeTD/BoaG8sVTdlPFksBE2lF21dN+A1AnrpIjSWqHHg==", + "dev": true, + "license": "ISC", + "dependencies": { + "asn1.js": "^4.10.1", + "browserify-aes": "^1.2.0", + "evp_bytestokey": "^1.0.3", + "pbkdf2": "^3.1.5", + "safe-buffer": "^5.2.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/path-browserify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", + "integrity": "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/pbkdf2": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.5.tgz", + "integrity": "sha512-Q3CG/cYvCO1ye4QKkuH7EXxs3VC/rI1/trd+qX2+PolbaKG0H+bgcZzrTt96mMyRtejk+JMCiLUn3y29W8qmFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "create-hash": "^1.2.0", + "create-hmac": "^1.1.7", + "ripemd160": "^2.0.3", + "safe-buffer": "^5.2.1", + "sha.js": "^2.4.12", + "to-buffer": "^1.2.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-dir": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-5.0.0.tgz", + "integrity": "sha512-NPE8TDbzl/3YQYY7CSS228s3g2ollTFnc+Qi3tqmqJp9Vg2ovUpixcJEo2HJScN2Ez+kEaal6y70c0ehqJBJeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^5.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/possible-typed-array-names": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", + "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/postcss": { + "version": "8.5.9", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.9.tgz", + "integrity": "sha512-7a70Nsot+EMX9fFU3064K/kdHWZqGVY+BADLyXc8Dfv+mTLLVl6JzJpPaCZ2kQL9gIJvKXSLMHhqdRRjwQeFtw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "dev": true, + "license": "MIT" + }, + "node_modules/public-encrypt": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz", + "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "bn.js": "^4.1.0", + "browserify-rsa": "^4.0.0", + "create-hash": "^1.1.0", + "parse-asn1": "^5.0.0", + "randombytes": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "node_modules/public-encrypt/node_modules/bn.js": { + "version": "4.12.3", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.3.tgz", + "integrity": "sha512-fGTi3gxV/23FTYdAoUtLYp6qySe2KE3teyZitipKNRuVYcBkoP/bB3guXN/XVKUe9mxCHXnc9C4ocyz8OmgN0g==", + "dev": true, + "license": "MIT" + }, + "node_modules/punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/qs": { + "version": "6.15.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.15.0.tgz", + "integrity": "sha512-mAZTtNCeetKMH+pSjrb76NAM8V9a05I9aBZOHztWy/UqcJdQYNsf59vrRKWnojAT9Y+GbIvoTBC++CPHqpDBhQ==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/querystring-es3": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz", + "integrity": "sha512-773xhDQnZBMFobEiztv8LIl70ch5MSF/jUQVlhwFyBILqq96anmoctVIYz+ZRp0qbCKATTn6ev02M3r7Ga5vqA==", + "dev": true, + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/randomfill": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz", + "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", + "dev": true, + "license": "MIT", + "dependencies": { + "randombytes": "^2.0.5", + "safe-buffer": "^5.1.0" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ripemd160": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.3.tgz", + "integrity": "sha512-5Di9UC0+8h1L6ZD2d7awM7E/T4uA1fJRlx6zk/NvdCCVEoAnFqvHmCuNeIKoCeIixBX/q8uM+6ycDvF8woqosA==", + "dev": true, + "license": "MIT", + "dependencies": { + "hash-base": "^3.1.2", + "inherits": "^2.0.4" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/ripemd160/node_modules/hash-base": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.2.tgz", + "integrity": "sha512-Bb33KbowVTIj5s7Ked1OsqHUeCpz//tPwR+E2zJgJKo9Z5XolZ9b6bdUgjmYlwnWhoOQKoTd1TYToZGn5mAYOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.4", + "readable-stream": "^2.3.8", + "safe-buffer": "^5.2.1", + "to-buffer": "^1.2.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/ripemd160/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/ripemd160/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dev": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/ripemd160/node_modules/readable-stream/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/ripemd160/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/ripemd160/node_modules/string_decoder/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/rollup": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.1.tgz", + "integrity": "sha512-VmtB2rFU/GroZ4oL8+ZqXgSA38O6GR8KSIvWmEFv63pQ0G6KaBH9s07PO8XTXP4vI+3UJUEypOfjkGfmSBBR0w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.60.1", + "@rollup/rollup-android-arm64": "4.60.1", + "@rollup/rollup-darwin-arm64": "4.60.1", + "@rollup/rollup-darwin-x64": "4.60.1", + "@rollup/rollup-freebsd-arm64": "4.60.1", + "@rollup/rollup-freebsd-x64": "4.60.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.60.1", + "@rollup/rollup-linux-arm-musleabihf": "4.60.1", + "@rollup/rollup-linux-arm64-gnu": "4.60.1", + "@rollup/rollup-linux-arm64-musl": "4.60.1", + "@rollup/rollup-linux-loong64-gnu": "4.60.1", + "@rollup/rollup-linux-loong64-musl": "4.60.1", + "@rollup/rollup-linux-ppc64-gnu": "4.60.1", + "@rollup/rollup-linux-ppc64-musl": "4.60.1", + "@rollup/rollup-linux-riscv64-gnu": "4.60.1", + "@rollup/rollup-linux-riscv64-musl": "4.60.1", + "@rollup/rollup-linux-s390x-gnu": "4.60.1", + "@rollup/rollup-linux-x64-gnu": "4.60.1", + "@rollup/rollup-linux-x64-musl": "4.60.1", + "@rollup/rollup-openbsd-x64": "4.60.1", + "@rollup/rollup-openharmony-arm64": "4.60.1", + "@rollup/rollup-win32-arm64-msvc": "4.60.1", + "@rollup/rollup-win32-ia32-msvc": "4.60.1", + "@rollup/rollup-win32-x64-gnu": "4.60.1", + "@rollup/rollup-win32-x64-msvc": "4.60.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safe-regex-test": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.1.0.tgz", + "integrity": "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-regex": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/setimmediate": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", + "integrity": "sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==", + "dev": true, + "license": "MIT" + }, + "node_modules/sha.js": { + "version": "2.4.12", + "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.12.tgz", + "integrity": "sha512-8LzC5+bvI45BjpfXU8V5fdU2mfeKiQe1D1gIMn7XUlF3OTUrpdJpPPH4EMAnF0DsHHdSZqCdSss5qCmJKuiO3w==", + "dev": true, + "license": "(MIT AND BSD-3-Clause)", + "dependencies": { + "inherits": "^2.0.4", + "safe-buffer": "^5.2.1", + "to-buffer": "^1.2.0" + }, + "bin": { + "sha.js": "bin.js" + }, + "engines": { + "node": ">= 0.10" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stream-browserify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-3.0.0.tgz", + "integrity": "sha512-H73RAHsVBapbim0tU2JwwOiXUj+fikfiaoYAKHF3VJfA0pe2BCzkhAHBlLG6REzE+2WNZcxOXjK7lkso+9euLA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "~2.0.4", + "readable-stream": "^3.5.0" + } + }, + "node_modules/stream-http": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-3.2.0.tgz", + "integrity": "sha512-Oq1bLqisTyK3TSCXpPbT4sdeYNdmyZJv1LxpEm2vu1ZhK89kSE5YXwZc3cWk0MagGaKriBh9mCFbVGtO+vY29A==", + "dev": true, + "license": "MIT", + "dependencies": { + "builtin-status-codes": "^3.0.0", + "inherits": "^2.0.4", + "readable-stream": "^3.6.0", + "xtend": "^4.0.2" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/timers-browserify": { + "version": "2.0.12", + "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz", + "integrity": "sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "setimmediate": "^1.0.4" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.16", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.16.tgz", + "integrity": "sha512-pn99VhoACYR8nFHhxqix+uvsbXineAasWm5ojXoN8xEwK5Kd3/TrhNn1wByuD52UxWRLy8pu+kRMniEi6Eq9Zg==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.4" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/to-buffer": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.2.2.tgz", + "integrity": "sha512-db0E3UJjcFhpDhAF4tLo03oli3pwl3dbnzXOUIlRKrp+ldk/VUxzpWYZENsw2SZiuBjHAk7DfB0VU7NKdpb6sw==", + "dev": true, + "license": "MIT", + "dependencies": { + "isarray": "^2.0.5", + "safe-buffer": "^5.2.1", + "typed-array-buffer": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/tty-browserify": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.1.tgz", + "integrity": "sha512-C3TaO7K81YvjCgQH9Q1S3R3P3BtN3RIM8n+OvX4il1K1zgE8ZhI0op7kClgkxtutIE8hQrcrHBXvIheqKUUCxw==", + "dev": true, + "license": "MIT" + }, + "node_modules/typed-array-buffer": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz", + "integrity": "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typescript": { + "version": "5.7.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.3.tgz", + "integrity": "sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw==", + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/url": { + "version": "0.11.4", + "resolved": "https://registry.npmjs.org/url/-/url-0.11.4.tgz", + "integrity": "sha512-oCwdVC7mTuWiPyjLUz/COz5TLk6wgp0RCsN+wHZ2Ekneac9w8uuV0njcbbie2ME+Vs+d6duwmYuR3HgQXs1fOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^1.4.1", + "qs": "^6.12.3" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/util": { + "version": "0.12.5", + "resolved": "https://registry.npmjs.org/util/-/util-0.12.5.tgz", + "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "is-arguments": "^1.0.4", + "is-generator-function": "^1.0.7", + "is-typed-array": "^1.1.3", + "which-typed-array": "^1.1.2" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/valibot": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/valibot/-/valibot-1.3.1.tgz", + "integrity": "sha512-sfdRir/QFM0JaF22hqTroPc5xy4DimuGQVKFrzF1YfGwaS1nJot3Y8VqMdLO2Lg27fMzat2yD3pY5PbAYO39Gg==", + "license": "MIT", + "peerDependencies": { + "typescript": ">=5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/vite": { + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.4.2.tgz", + "integrity": "sha512-2N/55r4JDJ4gdrCvGgINMy+HH3iRpNIz8K6SFwVsA+JbQScLiC+clmAxBgwiSPgcG9U15QmvqCGWzMbqda5zGQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.25.0", + "fdir": "^6.4.4", + "picomatch": "^4.0.2", + "postcss": "^8.5.3", + "rollup": "^4.34.9", + "tinyglobby": "^0.2.13" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "jiti": ">=1.21.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vite-plugin-node-polyfills": { + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/vite-plugin-node-polyfills/-/vite-plugin-node-polyfills-0.24.0.tgz", + "integrity": "sha512-GA9QKLH+vIM8NPaGA+o2t8PDfFUl32J8rUp1zQfMKVJQiNkOX4unE51tR6ppl6iKw5yOrDAdSH7r/UIFLCVhLw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rollup/plugin-inject": "^5.0.5", + "node-stdlib-browser": "^1.2.0" + }, + "funding": { + "url": "https://github.com/sponsors/davidmyersdev" + }, + "peerDependencies": { + "vite": "^2.0.0 || ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/vm-browserify": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz", + "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/which-typed-array": { + "version": "1.1.20", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.20.tgz", + "integrity": "sha512-LYfpUkmqwl0h9A2HL09Mms427Q1RZWuOHsukfVcKRq9q95iQxdw0ix1JQrqbcDR9PH1QDwf5Qo8OZb5lksZ8Xg==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "for-each": "^0.3.5", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.4" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/bindings/wasm/audit_trail_wasm/cypress/app/package.json b/bindings/wasm/audit_trail_wasm/cypress/app/package.json new file mode 100644 index 00000000..71ed8f81 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/cypress/app/package.json @@ -0,0 +1,20 @@ +{ + "name": "vite-project", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "preview": "vite preview" + }, + "devDependencies": { + "typescript": "~5.7.2", + "vite": "^6.2.0", + "vite-plugin-node-polyfills": "^0.24.0" + }, + "dependencies": { + "@iota/iota-sdk": "^1.0.0", + "@iota/audit-trail": "file:../.." + } +} \ No newline at end of file diff --git a/bindings/wasm/audit_trail_wasm/cypress/app/src/audit_trail.ts b/bindings/wasm/audit_trail_wasm/cypress/app/src/audit_trail.ts new file mode 100644 index 00000000..d5bbf1f7 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/cypress/app/src/audit_trail.ts @@ -0,0 +1,18 @@ +import url from "@iota/audit-trail/web/audit_trail_wasm_bg.wasm?url"; + +import { init } from "@iota/audit-trail/web"; +import { main } from "../../../examples/dist/web/web-main"; + +export const runTest = async (example: string) => { + try { + await main(example); + console.log("success"); + } catch (error) { + throw error; + } +}; + +init(url) + .then(() => { + console.log("init"); + }); diff --git a/bindings/wasm/audit_trail_wasm/cypress/app/src/main.ts b/bindings/wasm/audit_trail_wasm/cypress/app/src/main.ts new file mode 100644 index 00000000..e8945451 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/cypress/app/src/main.ts @@ -0,0 +1,3 @@ +import { runTest } from "./audit_trail"; + +globalThis.runTest = runTest; diff --git a/bindings/wasm/audit_trail_wasm/cypress/app/src/vite-env.d.ts b/bindings/wasm/audit_trail_wasm/cypress/app/src/vite-env.d.ts new file mode 100644 index 00000000..01691ca6 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/cypress/app/src/vite-env.d.ts @@ -0,0 +1,12 @@ +/// +declare const IOTA_AUDIT_TRAIL_PKG_ID: string; +declare const IOTA_TF_COMPONENTS_PKG_ID: string; +declare const NETWORK_NAME_FAUCET: string; +declare const ENV_NETWORK_URL: string; +declare const runTest: (example: string) => Promise; + +declare global { + var runTest: (example: string) => Promise; +} + +export {}; diff --git a/bindings/wasm/audit_trail_wasm/cypress/app/tsconfig.json b/bindings/wasm/audit_trail_wasm/cypress/app/tsconfig.json new file mode 100644 index 00000000..9469f855 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/cypress/app/tsconfig.json @@ -0,0 +1,27 @@ +{ + "extends": "../../tsconfig.json", + "compilerOptions": { + "useDefineForClassFields": true, + "module": "ESNext", + "lib": [ + "ES2020", + "DOM", + "DOM.Iterable" + ], + "skipLibCheck": true, + "allowJs": true, + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "isolatedModules": true, + "moduleDetection": "force", + "noEmit": true, + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true, + "noUncheckedSideEffectImports": true + }, + "include": [ + "src" + ] +} \ No newline at end of file diff --git a/bindings/wasm/audit_trail_wasm/cypress/app/vite.config.js b/bindings/wasm/audit_trail_wasm/cypress/app/vite.config.js new file mode 100644 index 00000000..305644fb --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/cypress/app/vite.config.js @@ -0,0 +1,38 @@ +import { defineConfig } from "vite"; +import { nodePolyfills } from "vite-plugin-node-polyfills"; +export default defineConfig(({ command, mode }) => { + // variables will be set during build time + const EXPOSED_ENVS = [ + "IOTA_AUDIT_TRAIL_PKG_ID", + "IOTA_TF_COMPONENTS_PKG_ID", + "NETWORK_NAME_FAUCET", + "NETWORK_URL", + ]; + + return { + plugins: [ + nodePolyfills({ + include: ["assert"], + }), + ], + define: EXPOSED_ENVS.reduce((prev, env_var) => { + const var_value = globalThis?.process?.env?.[env_var]; + if (var_value) { + console.log("exposing", env_var, var_value); + prev[`process.env.${env_var}`] = JSON.stringify(var_value); + } + return prev; + }, {}), + server: { + // open on default port or fail to make CI consistent + strictPort: true, + }, + build: { + rollupOptions: { + output: { + interop: "auto", + }, + }, + }, + }; +}); diff --git a/bindings/wasm/audit_trail_wasm/cypress/e2e/tests.cy.js b/bindings/wasm/audit_trail_wasm/cypress/e2e/tests.cy.js new file mode 100644 index 00000000..960ab311 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/cypress/e2e/tests.cy.js @@ -0,0 +1,35 @@ +const { _ } = Cypress; + +describe( + "Test Examples", + () => { + const examples = [ + "01_create_audit_trail", + "02_add_and_read_records", + "03_update_metadata", + "04_configure_locking", + "05_manage_access", + "06_delete_records", + "07_access_read_only_methods", + "08_delete_audit_trail", + "09_tagged_records", + "10_capability_constraints", + "11_manage_record_tags", + "01_customs_clearance", + "02_clinical_trial", + ]; + + _.each(examples, (example) => { + it(example, () => { + cy.visit("/", { + onBeforeLoad(win) { + cy.stub(win.console, "log").as("consoleLog"); + }, + }); + cy.get("@consoleLog").should("be.calledWith", "init"); + cy.window({ timeout: 180000 }).then({ timeout: 180000 }, (win) => win.runTest(example)); + cy.get("@consoleLog", { timeout: 180000 }).should("be.calledWith", "success"); + }); + }); + }, +); diff --git a/bindings/wasm/audit_trail_wasm/docs/wasm/api_ref.md b/bindings/wasm/audit_trail_wasm/docs/wasm/api_ref.md new file mode 100644 index 00000000..95a4cd62 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/docs/wasm/api_ref.md @@ -0,0 +1,9 @@ +**@iota/audit-trail API documentation** + +--- + +# @iota/audit-trail API documentation + +## Modules + +- [audit\_trail\_wasm](audit_trail_wasm/api_ref.md) diff --git a/bindings/wasm/audit_trail_wasm/docs/wasm/audit_trails_wasm/api_ref.md b/bindings/wasm/audit_trail_wasm/docs/wasm/audit_trails_wasm/api_ref.md new file mode 100644 index 00000000..e9c5b2e1 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/docs/wasm/audit_trails_wasm/api_ref.md @@ -0,0 +1,9 @@ +[**@iota/audit-trail API documentation**](../api_ref.md) + +--- + +# audit\_trail\_wasm + +## Classes + +- [DefaultHttpClient](classes/DefaultHttpClient.md) diff --git a/bindings/wasm/audit_trail_wasm/docs/wasm/audit_trails_wasm/classes/DefaultHttpClient.md b/bindings/wasm/audit_trail_wasm/docs/wasm/audit_trails_wasm/classes/DefaultHttpClient.md new file mode 100644 index 00000000..d3d974e5 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/docs/wasm/audit_trails_wasm/classes/DefaultHttpClient.md @@ -0,0 +1,7 @@ +[**@iota/audit-trail API documentation**](../../api_ref.md) + +--- + +# Class: DefaultHttpClient + +A default implementation for HttpClient. diff --git a/bindings/wasm/audit_trail_wasm/examples/README.md b/bindings/wasm/audit_trail_wasm/examples/README.md new file mode 100644 index 00000000..65aec6c9 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/README.md @@ -0,0 +1,71 @@ +# IOTA Audit Trail WASM Examples + +The examples in this folder demonstrate how to use the `@iota/audit-trail` package. + +## Environment + +Set the following environment variables before running the node examples: + +| Name | Required | Description | +| --------------------------- | ------------------- | ----------------------------------------------------- | +| `IOTA_AUDIT_TRAIL_PKG_ID` | yes | Package ID of the deployed `audit_trail` Move package | +| `IOTA_TF_COMPONENTS_PKG_ID` | local/custom setups | Package ID of the deployed `TfComponents` package | +| `NETWORK_URL` | yes | RPC URL of the IOTA node | +| `NETWORK_NAME_FAUCET` | local/test networks | Faucet alias used by `@iota/iota-sdk` | + +## Run + +Install dependencies and build the package: + +```bash +npm install +npm run build +``` + +Run an example: + +```bash +IOTA_AUDIT_TRAIL_PKG_ID= \ +IOTA_TF_COMPONENTS_PKG_ID= \ +NETWORK_URL=http://127.0.0.1:9000 \ +npm run example:node -- 01_create_audit_trail +``` + +### Localnet + +On localnet the publish script emits the required `export` statements directly. Use `eval` to set both variables in one step (run from the `audit_trail_wasm/` directory): + +```bash +eval $(../../../audit-trail-move/scripts/publish_package.sh) +npm run example:node -- 01_create_audit_trail +``` + +Available examples: + +### Core + +| Name | Description | +| ----------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `01_create_audit_trail` | Creates an audit trail, defines a RecordAdmin role, and issues a capability for it | +| `02_add_and_read_records` | Adds follow-up records, reads them individually and through paginated reads | +| `03_update_metadata` | Updates and clears mutable metadata while preserving immutable metadata via a MetadataAdmin role | +| `04_configure_locking` | Configures write and delete locks, demonstrates that locks block record creation | +| `05_manage_access` | Creates and updates a role, then demonstrates constrained capability issuance, revoke and destroy flows, denylist cleanup, and final role removal | +| `06_delete_records` | Deletes individual records and batch-deletes remaining records | +| `07_access_read_only_methods` | Reads trail metadata, record counts, pagination, and lock status | +| `08_delete_audit_trail` | Shows that non-empty trails cannot be deleted, batch-deletes records, then deletes the trail | + +### Advanced + +| Name | Description | +| --------------------------- | -------------------------------------------------------------------------------------- | +| `09_tagged_records` | Uses role tags and address-bound capabilities to restrict who may add tagged records | +| `10_capability_constraints` | Shows address-bound capability use and how revocation immediately blocks future writes | +| `11_manage_record_tags` | Delegates tag management, adds/removes tags, shows that in-use tags cannot be removed | + +### Real-World + +| Name | Description | +| ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | +| `01_customs_clearance` | Models customs clearance with role-tag restrictions, delegated capabilities, denied inspection writes, and a final write lock | +| `02_clinical_trial` | Models a clinical trial with time-constrained capabilities, mid-study tag addition, deletion windows, time-locks, and regulator verification | diff --git a/bindings/wasm/audit_trail_wasm/examples/src/01_create_audit_trail.ts b/bindings/wasm/audit_trail_wasm/examples/src/01_create_audit_trail.ts new file mode 100644 index 00000000..2ce12cf4 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/01_create_audit_trail.ts @@ -0,0 +1,59 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/** + * ## Actors + * + * - **Admin**: Creates the trail and holds the built-in Admin capability that is + * automatically minted on creation. + * - **RecordAdmin**: Receives a RecordAdmin capability bound to their address. Writes + * records in subsequent examples. + * + * Demonstrates how to: + * 1. Create an audit trail with immutable metadata, updatable metadata, and a seed record. + * 2. Inspect the built-in Admin role. + * 3. Define a RecordAdmin role and issue a capability for it. + */ + +import { CapabilityIssueOptions, PermissionSet } from "@iota/audit-trail/node"; +import { strict as assert } from "assert"; +import { createTrailWithSeedRecord, getFundedClient, TEST_GAS_BUDGET } from "./util"; + +export async function createAuditTrail(): Promise { + console.log("Creating an audit trail"); + + // `admin` creates the trail and holds the Admin capability. + // `recordAdmin` receives the RecordAdmin capability. + const admin = await getFundedClient(); + const recordAdmin = await getFundedClient(); + + console.log("Admin address: ", admin.senderAddress()); + console.log("RecordAdmin address: ", recordAdmin.senderAddress()); + + const { output: trail, response } = await createTrailWithSeedRecord(admin); + + console.log(`Created trail ${trail.id} with transaction ${response.digest}`); + console.log("Immutable metadata:", trail.immutableMetadata); + console.log("Updatable metadata:", trail.updatableMetadata); + console.log("Locking config:", trail.lockingConfig); + + assert.equal(trail.sequenceNumber, 1n); + assert.ok(trail.immutableMetadata); + assert.equal(trail.immutableMetadata?.name, "Example Audit Trail"); + + // Define a RecordAdmin role and issue the capability to recordAdmin's address. + const role = admin.trail(trail.id).access().forRole("RecordAdmin"); + await role + .create(PermissionSet.recordAdminPermissions()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await role + .issueCapability(new CapabilityIssueOptions(recordAdmin.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const onChain = await admin.trail(trail.id).get(); + const roleNames = onChain.roles.roles.map((r) => r.name); + console.log("Roles:", roleNames); + assert.ok(roleNames.includes("RecordAdmin")); +} diff --git a/bindings/wasm/audit_trail_wasm/examples/src/02_add_and_read_records.ts b/bindings/wasm/audit_trail_wasm/examples/src/02_add_and_read_records.ts new file mode 100644 index 00000000..161f788e --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/02_add_and_read_records.ts @@ -0,0 +1,74 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/** + * ## Actors + * + * - **Admin**: Creates the trail, defines the RecordAdmin role, and issues a capability. + * - **RecordAdmin**: Holds the capability and writes records. Reads are also done through + * this client to demonstrate that any address can read, but only the cap holder can write. + * + * Demonstrates how to: + * 1. Add follow-up records to a trail. + * 2. Read them back individually by sequence number. + * 3. Paginate through records. + */ + +import { CapabilityIssueOptions, Data, PermissionSet } from "@iota/audit-trail/node"; +import { strict as assert } from "assert"; +import { createTrailWithSeedRecord, getFundedClient, TEST_GAS_BUDGET } from "./util"; + +export async function addAndReadRecords(): Promise { + console.log("Adding records and reading them back with pagination"); + + // `admin` creates the trail and sets up the role. + // `recordAdmin` holds the capability and writes/reads records. + const admin = await getFundedClient(); + const recordAdmin = await getFundedClient(); + + const { output: trail } = await createTrailWithSeedRecord(admin); + const trailId = trail.id; + + // Create a RecordAdmin role and issue the capability to recordAdmin's address. + const role = admin.trail(trailId).access().forRole("RecordAdmin"); + await role + .create(PermissionSet.recordAdminPermissions()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await role + .issueCapability(new CapabilityIssueOptions(recordAdmin.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + // The client automatically finds the capability in recordAdmin's wallet. + const records = recordAdmin.trail(trailId).records(); + + // Add records + const addedSecond = await records + .add(Data.fromString("record 2"), "second") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(recordAdmin); + const addedThird = await records + .add(Data.fromString("record 3"), "third") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(recordAdmin); + + console.log("Added records:", addedSecond.output, addedThird.output); + + // Read individual records + const initial = await records.get(0n); + const first = await records.get(addedSecond.output.sequenceNumber); + assert.equal(initial.data.toString(), "seed record"); + assert.equal(first.data.toString(), "record 2"); + + // Paginate + const firstPage = await records.listPage(undefined, 2); + const secondPage = await records.listPage(firstPage.nextCursor, 2); + + console.log("First page:", firstPage); + console.log("Second page:", secondPage); + + assert.equal(firstPage.records.length, 2); + assert.equal(firstPage.hasNextPage, true); + assert.equal(secondPage.records.length, 1); +} diff --git a/bindings/wasm/audit_trail_wasm/examples/src/03_update_metadata.ts b/bindings/wasm/audit_trail_wasm/examples/src/03_update_metadata.ts new file mode 100644 index 00000000..7b3e69b0 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/03_update_metadata.ts @@ -0,0 +1,86 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/** + * ## Actors + * + * - **Admin**: Creates the trail and sets up the MetadataAdmin role. + * - **MetadataAdmin**: Holds the MetadataAdmin capability and updates the trail's mutable + * status field. Has no record-write permissions. + * + * Demonstrates how to: + * 1. Create a trail with immutable and updatable metadata. + * 2. Delegate metadata updates through a dedicated MetadataAdmin role. + * 3. Change and clear the trail's updatable metadata. + * 4. Verify that immutable metadata never changes. + */ + +import { CapabilityIssueOptions, PermissionSet } from "@iota/audit-trail/node"; +import { strict as assert } from "assert"; +import { getFundedClient, TEST_GAS_BUDGET } from "./util"; + +export async function updateMetadata(): Promise { + console.log("=== Audit Trail: Update Metadata ===\n"); + + // `admin` creates the trail and sets up the role. + // `metadataAdmin` holds the MetadataAdmin capability and updates the status. + const admin = await getFundedClient(); + const metadataAdmin = await getFundedClient(); + + const { output: trail } = await admin + .createTrail() + .withTrailMetadata("Shipment Processing", "Tracks the lifecycle of a warehouse shipment") + .withUpdatableMetadata("Status: Draft") + .withInitialRecordString("Shipment created", "event:created") + .finish() + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const trailId = trail.id; + + // Delegate metadata updates to a MetadataAdmin role. + const role = admin.trail(trailId).access().forRole("MetadataAdmin"); + await role + .create(PermissionSet.metadataAdminPermissions()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await role + .issueCapability(new CapabilityIssueOptions(metadataAdmin.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const before = await admin.trail(trailId).get(); + console.log("Before update:"); + console.log(" immutable =", before.immutableMetadata); + console.log(" updatable =", before.updatableMetadata, "\n"); + + // MetadataAdmin updates the mutable metadata. + await metadataAdmin + .trail(trailId) + .updateMetadata("Status: In Review") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(metadataAdmin); + + const afterUpdate = await admin.trail(trailId).get(); + console.log("After update:"); + console.log(" immutable =", afterUpdate.immutableMetadata); + console.log(" updatable =", afterUpdate.updatableMetadata, "\n"); + + assert.equal(afterUpdate.immutableMetadata?.name, "Shipment Processing"); + assert.equal(afterUpdate.updatableMetadata, "Status: In Review"); + + // MetadataAdmin clears the mutable metadata. + await metadataAdmin + .trail(trailId) + .updateMetadata(undefined) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(metadataAdmin); + + const afterClear = await admin.trail(trailId).get(); + console.log("After clear:"); + console.log(" immutable =", afterClear.immutableMetadata); + console.log(" updatable =", afterClear.updatableMetadata); + + assert.equal(afterClear.immutableMetadata?.name, "Shipment Processing"); + assert.equal(afterClear.updatableMetadata, undefined); +} diff --git a/bindings/wasm/audit_trail_wasm/examples/src/04_configure_locking.ts b/bindings/wasm/audit_trail_wasm/examples/src/04_configure_locking.ts new file mode 100644 index 00000000..7b282f36 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/04_configure_locking.ts @@ -0,0 +1,124 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/** + * ## Actors + * + * - **Admin**: Creates the trail and sets up the LockingAdmin and RecordAdmin roles. + * - **LockingAdmin**: Controls write and delete locks. Holds the LockingAdmin capability. + * - **RecordAdmin**: Writes records. Used to demonstrate that the write lock is enforced + * per-sender, not just checked by the admin. + * + * Demonstrates how to: + * 1. Delegate locking updates through a LockingAdmin role. + * 2. Freeze record creation with a write lock. + * 3. Restore writes and add a new record. + * 4. Update the delete-record window and delete-trail lock. + */ + +import { + CapabilityIssueOptions, + Data, + LockingConfig, + LockingWindow, + PermissionSet, + TimeLock, +} from "@iota/audit-trail/node"; +import { strict as assert } from "assert"; +import { createTrailWithSeedRecord, getFundedClient, TEST_GAS_BUDGET } from "./util"; + +export async function configureLocking(): Promise { + console.log("=== Audit Trail: Configure Locking ===\n"); + + // `admin` creates the trail and sets up roles. + // `lockingAdmin` controls locks; `recordAdmin` writes records. + const admin = await getFundedClient(); + const lockingAdmin = await getFundedClient(); + const recordAdmin = await getFundedClient(); + + const { output: trail } = await createTrailWithSeedRecord(admin); + const trailId = trail.id; + + // Create LockingAdmin and RecordAdmin roles. + const lockingRole = admin.trail(trailId).access().forRole("LockingAdmin"); + await lockingRole + .create(PermissionSet.lockingAdminPermissions()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await lockingRole + .issueCapability(new CapabilityIssueOptions(lockingAdmin.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const recordRole = admin.trail(trailId).access().forRole("RecordAdmin"); + await recordRole + .create(PermissionSet.recordAdminPermissions()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await recordRole + .issueCapability(new CapabilityIssueOptions(recordAdmin.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + // LockingAdmin freezes writes. + await lockingAdmin + .trail(trailId) + .locking() + .updateWriteLock(TimeLock.withInfinite()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(lockingAdmin); + + const locked = await admin.trail(trailId).get(); + console.log("Write lock after update:", locked.lockingConfig.writeLock, "\n"); + assert.equal(locked.lockingConfig.writeLock.type, TimeLock.withInfinite().type); + + // RecordAdmin attempts to add a record while locked — should fail. + const blockedAdd = await recordAdmin + .trail(trailId) + .records() + .add(Data.fromString("This write should fail"), "blocked") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(recordAdmin) + .catch(() => null); + assert.equal(blockedAdd, null, "write lock should block adding records"); + + // LockingAdmin lifts the write lock. + await lockingAdmin + .trail(trailId) + .locking() + .updateWriteLock(TimeLock.withNone()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(lockingAdmin); + + const added = await recordAdmin + .trail(trailId) + .records() + .add(Data.fromString("Write lock lifted"), "event:resumed") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(recordAdmin); + console.log("Added record", added.output.sequenceNumber, "after clearing the write lock.\n"); + + // LockingAdmin configures deletion window and trail lock. + await lockingAdmin + .trail(trailId) + .locking() + .updateDeleteRecordWindow(LockingWindow.withCountBased(BigInt(2))) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(lockingAdmin); + await lockingAdmin + .trail(trailId) + .locking() + .updateDeleteTrailLock(TimeLock.withInfinite()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(lockingAdmin); + + const finalState = await admin.trail(trailId).get(); + console.log("Final locking config:"); + console.log(" delete_record_window =", finalState.lockingConfig.deleteRecordWindow); + console.log(" delete_trail_lock =", finalState.lockingConfig.deleteTrailLock); + console.log(" write_lock =", finalState.lockingConfig.writeLock); + + assert.equal(finalState.lockingConfig.deleteRecordWindow.type, LockingWindow.withCountBased(BigInt(2)).type); + assert.equal(finalState.lockingConfig.deleteTrailLock.type, TimeLock.withInfinite().type); + assert.equal(finalState.lockingConfig.writeLock.type, TimeLock.withNone().type); +} diff --git a/bindings/wasm/audit_trail_wasm/examples/src/05_manage_access.ts b/bindings/wasm/audit_trail_wasm/examples/src/05_manage_access.ts new file mode 100644 index 00000000..3fd1a0f0 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/05_manage_access.ts @@ -0,0 +1,133 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/** + * ## Actors + * + * - **Admin**: Creates and updates roles, issues capabilities, revokes and destroys them, + * and finally deletes the role once it is no longer needed. + * - **OperationsUser**: The subject of all capability issuance. Capabilities are bound to + * this address to demonstrate that revocation immediately blocks their access. + * + * Demonstrates how to: + * 1. Create and update a custom role. + * 2. Issue a constrained capability for that role. + * 3. Revoke one capability and destroy another. + * 4. Remove the role after its capabilities are no longer needed. + */ + +import { CapabilityIssueOptions, Permission, PermissionSet } from "@iota/audit-trail/node"; +import { strict as assert } from "assert"; +import { createTrailWithSeedRecord, getFundedClient, TEST_GAS_BUDGET } from "./util"; + +export async function manageAccess(): Promise { + console.log("=== Audit Trail: Manage Access ===\n"); + + // `admin` manages roles and the full capability lifecycle. + // `operationsUser` is the target of all capability issuance. + const admin = await getFundedClient(); + const operationsUser = await getFundedClient(); + + const { output: trail } = await createTrailWithSeedRecord(admin); + const trailId = trail.id; + + // 1. Create the role + const createdRole = await admin + .trail(trailId) + .access() + .forRole("Operations") + .create(PermissionSet.recordAdminPermissions()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + console.log("Created role:", createdRole.output.role, "\n"); + + // 2. Update the role permissions + const updatedPermissionValues = [ + Permission.AddRecord, + Permission.DeleteRecord, + Permission.DeleteAllRecords, + ]; + const updatedPermissions = new PermissionSet(updatedPermissionValues); + const updatedRole = await admin + .trail(trailId) + .access() + .forRole("Operations") + .updatePermissions(updatedPermissions) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + console.log("Updated role permissions:", updatedRole.output.permissions.permissions.map((p) => p.toString())); + + // 3. Issue a constrained capability bound to operationsUser's address. + const constrainedCap = await admin + .trail(trailId) + .access() + .forRole("Operations") + .issueCapability( + new CapabilityIssueOptions(operationsUser.senderAddress(), undefined, BigInt(4_102_444_800_000)), + ) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + console.log("\nIssued constrained capability:"); + console.log(" id =", constrainedCap.output.capabilityId); + console.log(" issued_to =", constrainedCap.output.issuedTo); + console.log(" valid_until =", constrainedCap.output.validUntil, "\n"); + + // Verify the on-chain role matches the updated permissions. + const onChain = await admin.trail(trailId).get(); + const opsRole = onChain.roles.roles.find((r) => r.name === "Operations"); + assert.ok(opsRole, "Operations role must exist"); + const opsPermSet = new Set(opsRole?.permissions.map((p) => p.toString())); + for (const perm of updatedPermissionValues) { + assert(opsPermSet.has(perm.toString()), `role should contain ${perm}`); + } + + // 4. Revoke the constrained capability. + await admin + .trail(trailId) + .access() + .revokeCapability(constrainedCap.output.capabilityId, constrainedCap.output.validUntil) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + console.log("Revoked capability", constrainedCap.output.capabilityId, "\n"); + + // 5. Issue a disposable capability (to admin) and destroy it. + // destroyCapability consumes the capability object, so the signer must own it. + // The capability is issued to admin so admin can destroy it directly. + const disposableCap = await admin + .trail(trailId) + .access() + .forRole("Operations") + .issueCapability(new CapabilityIssueOptions(admin.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await admin + .trail(trailId) + .access() + .destroyCapability(disposableCap.output.capabilityId) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + console.log("Destroyed capability", disposableCap.output.capabilityId, "\n"); + + // 6. Clean up the revoked-capability registry entry so the role can be removed. + await admin + .trail(trailId) + .access() + .cleanupRevokedCapabilities() + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + console.log("Cleaned up revoked capability registry entries.\n"); + + // 7. Delete the role. + await admin + .trail(trailId) + .access() + .forRole("Operations") + .delete() + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + const afterDelete = await admin.trail(trailId).get(); + const opsRoleAfterDelete = afterDelete.roles.roles.find((r) => r.name === "Operations"); + assert.equal(opsRoleAfterDelete, undefined, "role should be removed from the trail"); + + console.log("Removed the custom role after its capability lifecycle completed."); +} diff --git a/bindings/wasm/audit_trail_wasm/examples/src/06_delete_records.ts b/bindings/wasm/audit_trail_wasm/examples/src/06_delete_records.ts new file mode 100644 index 00000000..556e6631 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/06_delete_records.ts @@ -0,0 +1,97 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/** + * ## Actors + * + * - **Admin**: Creates the trail and sets up the RecordMaintenance role. + * - **RecordMaintainer**: Holds the RecordMaintenance capability. Adds records and then + * deletes them individually and in batch. + * + * Demonstrates how to: + * 1. Create records via a delegated RecordMaintenance role. + * 2. Delete a single record by sequence number. + * 3. Batch-delete remaining records. + */ + +import { + CapabilityIssueOptions, + Data, + LockingConfig, + LockingWindow, + Permission, + PermissionSet, + TimeLock, +} from "@iota/audit-trail/node"; +import { strict as assert } from "assert"; +import { getFundedClient, TEST_GAS_BUDGET } from "./util"; + +export async function deleteRecords(): Promise { + console.log("=== Audit Trail: Delete Records ===\n"); + + // `admin` creates the trail and sets up the role. + // `recordMaintainer` adds and deletes records. + const admin = await getFundedClient(); + const recordMaintainer = await getFundedClient(); + + const { output: trail } = await admin + .createTrail() + .withTrailMetadata("Delete Records Example", "Trail configured to demonstrate record deletions") + .withUpdatableMetadata("Status: Active") + .withLockingConfig( + new LockingConfig(LockingWindow.withNone(), TimeLock.withNone(), TimeLock.withNone()), + ) + .withInitialRecordString("Seed record", "v0") + .finish() + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const trailId = trail.id; + + // Create a role with delete permissions and issue to recordMaintainer. + const role = admin.trail(trailId).access().forRole("RecordMaintenance"); + await role + .create(new PermissionSet([Permission.AddRecord, Permission.DeleteRecord, Permission.DeleteAllRecords])) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await role + .issueCapability(new CapabilityIssueOptions(recordMaintainer.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const records = recordMaintainer.trail(trailId).records(); + + // RecordMaintainer adds records. + const rec1 = await records + .add(Data.fromString("First record"), "v1") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(recordMaintainer); + const rec2 = await records + .add(Data.fromString("Second record"), "v2") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(recordMaintainer); + + console.log("Added records", rec1.output.sequenceNumber, "and", rec2.output.sequenceNumber); + + // Delete a single record. + const deleted = await records + .delete(rec1.output.sequenceNumber) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(recordMaintainer); + console.log("Deleted record", deleted.output.sequenceNumber); + + let count = await records.recordCount(); + console.log("Record count after single delete:", count); + assert.equal(count, 2n); // seed + rec2 + + // Batch-delete remaining records. + const batchDeleted = await records + .deleteBatch(BigInt(10)) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(recordMaintainer); + console.log("Batch deleted", batchDeleted.output, "records"); + + count = await records.recordCount(); + assert.equal(count, 0n, "all records should be deleted after batch"); + console.log("Record count after batch delete:", count); +} diff --git a/bindings/wasm/audit_trail_wasm/examples/src/07_access_read_only_methods.ts b/bindings/wasm/audit_trail_wasm/examples/src/07_access_read_only_methods.ts new file mode 100644 index 00000000..8086199f --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/07_access_read_only_methods.ts @@ -0,0 +1,97 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/** + * ## Actors + * + * - **Admin**: Creates the trail and sets up the RecordAdmin role. + * - **RecordAdmin**: Adds one follow-up record. All subsequent operations are read-only + * and can be performed by any address — no capability required. + * + * Demonstrates how to: + * 1. Load the full on-chain trail object. + * 2. Inspect metadata, roles, and locking configuration. + * 3. Read records individually and through pagination. + * 4. Query the record-count and lock-status helpers. + */ + +import { + CapabilityIssueOptions, + Data, + LockingConfig, + LockingWindow, + PermissionSet, + TimeLock, +} from "@iota/audit-trail/node"; +import { strict as assert } from "assert"; +import { getFundedClient, TEST_GAS_BUDGET } from "./util"; + +export async function accessReadOnlyMethods(): Promise { + console.log("=== Audit Trail: Read-Only Inspection ===\n"); + + // `admin` creates the trail and sets up the role. + // `recordAdmin` adds the follow-up record. + const admin = await getFundedClient(); + const recordAdmin = await getFundedClient(); + + const { output: created } = await admin + .createTrail() + .withTrailMetadata("Operations Trail", "Used to inspect read-only accessors") + .withUpdatableMetadata("Status: Active") + .withLockingConfig( + new LockingConfig(LockingWindow.withCountBased(BigInt(2)), TimeLock.withNone(), TimeLock.withNone()), + ) + .withInitialRecordString("Initial record", "event:created") + .finish() + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const trailId = created.id; + + // Create RecordAdmin role and issue to recordAdmin. + const role = admin.trail(trailId).access().forRole("RecordAdmin"); + await role + .create(PermissionSet.recordAdminPermissions()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await role + .issueCapability(new CapabilityIssueOptions(recordAdmin.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + // RecordAdmin adds a follow-up record. + await recordAdmin + .trail(trailId) + .records() + .add(Data.fromString("Follow-up record"), "event:updated") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(recordAdmin); + + // All reads below require no capability — any address can inspect the trail. + const onChain = await admin.trail(trailId).get(); + console.log("Trail summary:"); + console.log(" id =", onChain.id); + console.log(" creator =", onChain.creator); + console.log(" created_at =", onChain.createdAt); + console.log(" sequence_number =", onChain.sequenceNumber); + console.log(" immutable_metadata =", onChain.immutableMetadata); + console.log(" updatable_metadata =", onChain.updatableMetadata, "\n"); + + console.log("Roles:", onChain.roles.roles.map((r) => r.name)); + console.log("Locking config:", onChain.lockingConfig, "\n"); + + const trailHandle = admin.trail(trailId); + const count = await trailHandle.records().recordCount(); + const initialRecord = await trailHandle.records().get(0n); + const firstPage = await trailHandle.records().listPage(undefined, 10); + const recordZeroLocked = await trailHandle.locking().isRecordLocked(0n); + + console.log("Record count:", count); + console.log("Record #0:", initialRecord); + console.log("First page size:", firstPage.records.length, "(has_next_page =", firstPage.hasNextPage, ")"); + console.log("Is record #0 locked?", recordZeroLocked); + + assert.equal(count, 2n); + assert.equal(initialRecord.data.toString(), "Initial record"); + assert.equal(firstPage.records.length, 2); +} diff --git a/bindings/wasm/audit_trail_wasm/examples/src/08_delete_audit_trail.ts b/bindings/wasm/audit_trail_wasm/examples/src/08_delete_audit_trail.ts new file mode 100644 index 00000000..2e594d09 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/08_delete_audit_trail.ts @@ -0,0 +1,93 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/** + * ## Actors + * + * - **Admin**: Creates the trail and sets up the MaintenanceAdmin role. + * - **MaintenanceAdmin**: Holds delete permissions. Attempts (and fails) to delete the + * non-empty trail, then batch-deletes all records before removing the trail itself. + * + * Demonstrates how to: + * 1. Show that a non-empty trail cannot be deleted. + * 2. Empty the trail with deleteBatch. + * 3. Delete the trail once its records are gone. + */ + +import { CapabilityIssueOptions, Data, Permission, PermissionSet } from "@iota/audit-trail/node"; +import { strict as assert } from "assert"; +import { getFundedClient, TEST_GAS_BUDGET } from "./util"; + +export async function deleteAuditTrail(): Promise { + console.log("=== Audit Trail: Delete Trail ===\n"); + + // `admin` creates the trail and sets up the role. + // `maintenanceAdmin` empties and deletes the trail. + const admin = await getFundedClient(); + const maintenanceAdmin = await getFundedClient(); + + const { output: created } = await admin + .createTrail() + .withInitialRecordString("Initial record", "event:created") + .finish() + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const trailId = created.id; + + // Create a role with delete permissions and issue to maintenanceAdmin. + const role = admin.trail(trailId).access().forRole("MaintenanceAdmin"); + await role + .create(new PermissionSet([Permission.DeleteAllRecords, Permission.DeleteAuditTrail])) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await role + .issueCapability(new CapabilityIssueOptions(maintenanceAdmin.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const maintenanceTrail = maintenanceAdmin.trail(trailId); + + // 1. Attempting to delete a non-empty trail should fail. + let deleteWhileNonEmptySucceeded = false; + try { + await maintenanceTrail + .deleteAuditTrail() + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(maintenanceAdmin); + deleteWhileNonEmptySucceeded = true; + } catch { + // Expected + } + assert.equal(deleteWhileNonEmptySucceeded, false, "a trail must be empty before deletion"); + console.log("Deleting the non-empty trail failed as expected.\n"); + + // 2. Batch-delete all records. + const deletedRecords = await maintenanceTrail + .records() + .deleteBatch(BigInt(10)) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(maintenanceAdmin); + console.log("Deleted", deletedRecords.output, "record(s) before trail removal.\n"); + + const count = await maintenanceTrail.records().recordCount(); + assert.equal(count, 0n, "trail should have no records after batch delete"); + + // 3. Delete the now-empty trail. + const deletedTrail = await maintenanceTrail + .deleteAuditTrail() + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(maintenanceAdmin); + console.log("Trail deleted:"); + console.log(" trail_id =", deletedTrail.output.trailId); + console.log(" timestamp =", deletedTrail.output.timestamp); + + let getAfterDeleteSucceeded = false; + try { + await maintenanceTrail.get(); + getAfterDeleteSucceeded = true; + } catch { + // Expected + } + assert.equal(getAfterDeleteSucceeded, false, "deleted trail should no longer be readable"); +} diff --git a/bindings/wasm/audit_trail_wasm/examples/src/advanced/09_tagged_records.ts b/bindings/wasm/audit_trail_wasm/examples/src/advanced/09_tagged_records.ts new file mode 100644 index 00000000..57b8e87a --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/advanced/09_tagged_records.ts @@ -0,0 +1,86 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/** + * ## Actors + * + * - **Admin**: Creates the trail, defines the FinanceWriter role restricted to the + * `finance` tag, and issues a capability bound to `financeWriter`'s address. + * - **FinanceWriter**: Holds the address-bound capability. Can add `finance`-tagged + * records but is blocked from writing `legal`-tagged records. + * + * Demonstrates how to: + * 1. Create a trail with a predefined tag registry. + * 2. Define a role that is restricted to one record tag. + * 3. Issue a capability bound to a specific wallet address. + * 4. Show that the holder can add only records matching the allowed tag. + */ + +import { CapabilityIssueOptions, Data, Permission, PermissionSet, RoleTags } from "@iota/audit-trail/node"; +import { strict as assert } from "assert"; +import { getFundedClient, TEST_GAS_BUDGET } from "../util"; + +export async function taggedRecords(): Promise { + console.log("=== Audit Trail Advanced: Tagged Records ===\n"); + + const admin = await getFundedClient(); + const financeWriter = await getFundedClient(); + + const { output: created } = await admin + .createTrail() + .withRecordTags(["finance", "legal"]) + .withInitialRecordString("Trail created", "event:created") + .finish() + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const trailId = created.id; + + // Create a role restricted to the "finance" tag. + const role = admin.trail(trailId).access().forRole("FinanceWriter"); + await role + .create(new PermissionSet([Permission.AddRecord]), new RoleTags(["finance"])) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const issued = await role + .issueCapability(new CapabilityIssueOptions(financeWriter.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + console.log( + "Issued FinanceWriter capability", + issued.output.capabilityId, + "to", + financeWriter.senderAddress(), + "\n", + ); + + // The client automatically finds the capability in financeWriter's wallet. + const financeRecords = financeWriter.trail(trailId).records(); + + // Add a record with the allowed tag. + const added = await financeRecords + .add(Data.fromString("Invoice approved"), "department:finance", "finance") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(financeWriter); + + console.log("Added tagged record at sequence number", added.output.sequenceNumber, "with tag \"finance\".\n"); + + // Attempt to add a record with a different tag — should fail. + let wrongTagSucceeded = false; + try { + await financeRecords + .add(Data.fromString("Legal review completed"), "department:legal", "legal") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(financeWriter); + wrongTagSucceeded = true; + } catch { + // Expected + } + assert.equal(wrongTagSucceeded, false, "a finance-scoped role must not add a legal-tagged record"); + + const financeRecord = await financeRecords.get(added.output.sequenceNumber); + console.log("Stored tagged record:", financeRecord); + assert.equal(financeRecord.tag, "finance"); +} diff --git a/bindings/wasm/audit_trail_wasm/examples/src/advanced/10_capability_constraints.ts b/bindings/wasm/audit_trail_wasm/examples/src/advanced/10_capability_constraints.ts new file mode 100644 index 00000000..4f59c46f --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/advanced/10_capability_constraints.ts @@ -0,0 +1,103 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/** + * ## Actors + * + * - **Admin**: Creates the trail, defines the RecordAdmin role, and issues a capability + * bound specifically to `intendedWriter`'s address. Also performs revocation. + * - **IntendedWriter**: The authorised holder. Writes a record successfully before + * revocation, then is blocked after the capability is revoked. + * - **WrongWriter**: An unauthorised actor who attempts to use the address-bound capability. + * All write attempts are rejected by the Move contract. + * + * Demonstrates how to: + * 1. Bind a capability to a specific wallet address. + * 2. Show that a different wallet cannot use it. + * 3. Revoke the capability and confirm the bound holder can no longer use it. + */ + +import { CapabilityIssueOptions, Data, PermissionSet } from "@iota/audit-trail/node"; +import { strict as assert } from "assert"; +import { createTrailWithSeedRecord, getFundedClient, TEST_GAS_BUDGET } from "../util"; + +export async function capabilityConstraints(): Promise { + console.log("=== Audit Trail Advanced: Capability Constraints ===\n"); + + const admin = await getFundedClient(); + const intendedWriter = await getFundedClient(); + const wrongWriter = await getFundedClient(); + + const { output: created } = await createTrailWithSeedRecord(admin); + const trailId = created.id; + + // Create a RecordAdmin role. + await admin + .trail(trailId) + .access() + .forRole("RecordAdmin") + .create(PermissionSet.recordAdminPermissions()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + // Issue a capability bound to the intended writer's address. + const issued = await admin + .trail(trailId) + .access() + .forRole("RecordAdmin") + .issueCapability(new CapabilityIssueOptions(intendedWriter.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + console.log("Issued capability", issued.output.capabilityId, "to", intendedWriter.senderAddress(), "\n"); + + // The wrong wallet should not be able to add a record. + let wrongWriterSucceeded = false; + try { + await wrongWriter + .trail(trailId) + .records() + .add(Data.fromString("Wrong writer"), undefined, undefined) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(wrongWriter); + wrongWriterSucceeded = true; + } catch { + // Expected + } + assert.equal(wrongWriterSucceeded, false, "a capability bound to another address must not be usable"); + + // The intended writer CAN add a record. + const added = await intendedWriter + .trail(trailId) + .records() + .add(Data.fromString("Authorized writer"), undefined, undefined) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(intendedWriter); + + console.log("Bound holder added record", added.output.sequenceNumber, "successfully.\n"); + + // Revoke the capability. + await admin + .trail(trailId) + .access() + .revokeCapability(issued.output.capabilityId, issued.output.validUntil) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + // The intended writer should no longer be able to add a record. + let revokedSucceeded = false; + try { + await intendedWriter + .trail(trailId) + .records() + .add(Data.fromString("Should fail after revoke"), undefined, undefined) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(intendedWriter); + revokedSucceeded = true; + } catch { + // Expected + } + assert.equal(revokedSucceeded, false, "revoked capabilities must no longer authorize record writes"); + + console.log("Revoked capability", issued.output.capabilityId, "and verified it can no longer be used."); +} diff --git a/bindings/wasm/audit_trail_wasm/examples/src/advanced/11_manage_record_tags.ts b/bindings/wasm/audit_trail_wasm/examples/src/advanced/11_manage_record_tags.ts new file mode 100644 index 00000000..3b1f6b78 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/advanced/11_manage_record_tags.ts @@ -0,0 +1,104 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/** + * ## Actors + * + * - **Admin**: Creates the trail and manages roles. + * - **TagAdmin**: Holds the TagAdmin capability. Adds and removes entries from the trail's + * tag registry. + * - **FinanceWriter**: Holds a `finance`-scoped RecordAdmin capability. Writes a + * `finance`-tagged record that keeps the `finance` tag in use and therefore unremovable. + * + * Demonstrates how to: + * 1. Delegate record-tag registry management to a TagAdmin role. + * 2. Add and remove tags from the trail registry. + * 3. Show that tags still in use by roles or records cannot be removed. + */ + +import { CapabilityIssueOptions, Data, PermissionSet, RoleTags } from "@iota/audit-trail/node"; +import { strict as assert } from "assert"; +import { getFundedClient, TEST_GAS_BUDGET } from "../util"; + +export async function manageRecordTags(): Promise { + console.log("=== Audit Trail Advanced: Manage Record Tags ===\n"); + + // `admin` creates the trail and manages roles. + // `tagAdmin` adds/removes tags; `financeWriter` writes tagged records. + const admin = await getFundedClient(); + const tagAdmin = await getFundedClient(); + const financeWriter = await getFundedClient(); + + const { output: created } = await admin + .createTrail() + .withRecordTags(["finance"]) + .withInitialRecordString("Trail created") + .finish() + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const trailId = created.id; + + // Delegate tag management to a TagAdmin role. + const tagAdminRole = admin.trail(trailId).access().forRole("TagAdmin"); + await tagAdminRole + .create(PermissionSet.tagAdminPermissions()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await tagAdminRole + .issueCapability(new CapabilityIssueOptions(tagAdmin.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + // TagAdmin adds a new tag. + await tagAdmin.trail(trailId).tags().add("legal").withGasBudget(TEST_GAS_BUDGET).buildAndExecute(tagAdmin); + + let onChain = await admin.trail(trailId).get(); + console.log("Registry after adding \"legal\":", onChain.tags.map((t) => t.tag), "\n"); + assert.ok(onChain.tags.some((t) => t.tag === "finance")); + assert.ok(onChain.tags.some((t) => t.tag === "legal")); + + // Create a role scoped to "finance" tag and issue to financeWriter. + await admin + .trail(trailId) + .access() + .forRole("FinanceWriter") + .create(PermissionSet.recordAdminPermissions(), new RoleTags(["finance"])) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await admin + .trail(trailId) + .access() + .forRole("FinanceWriter") + .issueCapability(new CapabilityIssueOptions(financeWriter.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + // FinanceWriter adds a record using the "finance" tag. + await financeWriter + .trail(trailId) + .records() + .add(Data.fromString("Tagged finance entry"), undefined, "finance") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(financeWriter); + + // TagAdmin attempts to remove "finance" tag — should fail because it's in use. + let removeFinanceSucceeded = false; + try { + await tagAdmin.trail(trailId).tags().remove("finance").withGasBudget(TEST_GAS_BUDGET).buildAndExecute(tagAdmin); + removeFinanceSucceeded = true; + } catch { + // Expected + } + assert.equal(removeFinanceSucceeded, false, "a tag referenced by a role or record must not be removable"); + + // TagAdmin removes "legal" tag — should succeed because nothing uses it. + await tagAdmin.trail(trailId).tags().remove("legal").withGasBudget(TEST_GAS_BUDGET).buildAndExecute(tagAdmin); + + onChain = await admin.trail(trailId).get(); + console.log("Registry after removing \"legal\":", onChain.tags.map((t) => t.tag), "\n"); + assert.ok(onChain.tags.some((t) => t.tag === "finance"), "finance tag should still exist"); + assert.ok(!onChain.tags.some((t) => t.tag === "legal"), "legal tag should be removed"); + + console.log("Tag management completed successfully."); +} diff --git a/bindings/wasm/audit_trail_wasm/examples/src/main.ts b/bindings/wasm/audit_trail_wasm/examples/src/main.ts new file mode 100644 index 00000000..db36f304 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/main.ts @@ -0,0 +1,58 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +import { createAuditTrail } from "./01_create_audit_trail"; +import { addAndReadRecords } from "./02_add_and_read_records"; +import { updateMetadata } from "./03_update_metadata"; +import { configureLocking } from "./04_configure_locking"; +import { manageAccess } from "./05_manage_access"; +import { deleteRecords } from "./06_delete_records"; +import { accessReadOnlyMethods } from "./07_access_read_only_methods"; +import { deleteAuditTrail } from "./08_delete_audit_trail"; +import { taggedRecords } from "./advanced/09_tagged_records"; +import { capabilityConstraints } from "./advanced/10_capability_constraints"; +import { manageRecordTags } from "./advanced/11_manage_record_tags"; +import { customsClearance } from "./real-world/01_customs_clearance"; +import { clinicalTrial } from "./real-world/02_clinical_trial"; + +export async function main(example?: string) { + const argument = example ?? process.argv?.[2]?.toLowerCase(); + if (!argument) { + throw new Error("Please specify an example name, e.g. '01_create_audit_trail'"); + } + + switch (argument) { + case "01_create_audit_trail": + return createAuditTrail(); + case "02_add_and_read_records": + return addAndReadRecords(); + case "03_update_metadata": + return updateMetadata(); + case "04_configure_locking": + return configureLocking(); + case "05_manage_access": + return manageAccess(); + case "06_delete_records": + return deleteRecords(); + case "07_access_read_only_methods": + return accessReadOnlyMethods(); + case "08_delete_audit_trail": + return deleteAuditTrail(); + case "09_tagged_records": + return taggedRecords(); + case "10_capability_constraints": + return capabilityConstraints(); + case "11_manage_record_tags": + return manageRecordTags(); + case "01_customs_clearance": + return customsClearance(); + case "02_clinical_trial": + return clinicalTrial(); + default: + throw new Error(`Unknown example name: '${argument}'`); + } +} + +main().catch((error) => { + console.error("Example error:", error); +}); diff --git a/bindings/wasm/audit_trail_wasm/examples/src/real-world/01_customs_clearance.ts b/bindings/wasm/audit_trail_wasm/examples/src/real-world/01_customs_clearance.ts new file mode 100644 index 00000000..85e6ccbf --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/real-world/01_customs_clearance.ts @@ -0,0 +1,302 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/** + * # Customs Clearance Example + * + * Models a customs-clearance process for a single shipment. + * + * ## Actors + * + * - **Admin**: Creates the trail and sets up all roles and capabilities. + * - **DocsOperator**: Handles document submission (invoices, packing lists). Writes only + * `documents`-tagged records. + * - **ExportBroker**: Files export declarations and records clearance decisions at the origin. + * Writes only `export`-tagged records. + * - **ImportBroker**: Handles duty assessment and import clearance at the destination. + * Writes only `import`-tagged records. + * - **Inspector**: Records the outcome of a customs physical inspection. Writes only + * `inspection`-tagged records; the role is created mid-process when an inspection is triggered. + * - **Supervisor**: Updates the mutable trail metadata (processing status). No record-write + * permissions. + * - **LockingAdmin**: Freezes the trail once the shipment is fully cleared. + * + * ## How the trail is used + * + * - immutable_metadata: shipment and declaration identity + * - updatable_metadata: current customs-processing status + * - record tags: documents, export, import, inspection + * - roles and capabilities: each operational role writes only the events it owns + * - locking: writes are frozen once the shipment is fully cleared + */ + +import { + AuditTrailClient, + CapabilityIssueOptions, + Data, + LockingConfig, + LockingWindow, + PermissionSet, + RoleTags, + TimeLock, +} from "@iota/audit-trail/node"; +import { strict as assert } from "assert"; +import { getFundedClient, TEST_GAS_BUDGET } from "../util"; + +export async function customsClearance(): Promise { + console.log("=== Customs Clearance ===\n"); + + const admin = await getFundedClient(); + const docsOperator = await getFundedClient(); + const exportBroker = await getFundedClient(); + const importBroker = await getFundedClient(); + const supervisor = await getFundedClient(); + const lockingAdmin = await getFundedClient(); + const inspector = await getFundedClient(); + + // === Create the customs-clearance trail === + + console.log("Creating a customs-clearance trail..."); + + const { output: created } = await admin + .createTrail() + .withRecordTags(["documents", "export", "import", "inspection"]) + .withTrailMetadata( + "Shipment SHP-2026-CLEAR-001", + "Route: Hamburg, Germany -> Nairobi, Kenya | Declaration: DEC-2026-44017", + ) + .withUpdatableMetadata("Status: Documents Pending") + .withLockingConfig( + new LockingConfig(LockingWindow.withCountBased(BigInt(2)), TimeLock.withNone(), TimeLock.withNone()), + ) + .withInitialRecordString("Customs clearance case opened for inbound shipment", "event:case_opened", "documents") + .finish() + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const trailId = created.id; + + // === Set up roles and capabilities for each actor === + + await issueTaggedRecordRole(admin, trailId, "DocsOperator", "documents", docsOperator.senderAddress()); + await issueTaggedRecordRole(admin, trailId, "ExportBroker", "export", exportBroker.senderAddress()); + await issueTaggedRecordRole(admin, trailId, "ImportBroker", "import", importBroker.senderAddress()); + + // Supervisor can update metadata. + await admin + .trail(trailId) + .access() + .forRole("Supervisor") + .create(PermissionSet.metadataAdminPermissions()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await admin + .trail(trailId) + .access() + .forRole("Supervisor") + .issueCapability(new CapabilityIssueOptions(supervisor.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + // LockingAdmin can manage locking. + await admin + .trail(trailId) + .access() + .forRole("LockingAdmin") + .create(PermissionSet.lockingAdminPermissions()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await admin + .trail(trailId) + .access() + .forRole("LockingAdmin") + .issueCapability(new CapabilityIssueOptions(lockingAdmin.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + // === Document submission === + + // Documents are stored off-chain in an access-controlled environment (e.g. a TWIN node). + // Only the SHA-256 fingerprint is committed on-chain for tamper-evidence. + const invoiceBytes = new TextEncoder().encode("invoice-SHP-2026-CLEAR-001-v1.pdf"); + const invoiceHash = new Uint8Array(await crypto.subtle.digest("SHA-256", invoiceBytes)); + const docsUploaded = await docsOperator + .trail(trailId) + .records() + .add(Data.fromBytes(invoiceHash), "event:documents_uploaded", "documents") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(docsOperator); + console.log("Docs operator added record", docsUploaded.output.sequenceNumber + ".\n"); + + await supervisor + .trail(trailId) + .updateMetadata("Status: Awaiting Export Clearance") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(supervisor); + + // === Export clearance === + + const exportFiled = await exportBroker + .trail(trailId) + .records() + .add( + Data.fromString("Export declaration filed with German customs"), + "event:export_declaration_filed", + "export", + ) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(exportBroker); + + const exportCleared = await exportBroker + .trail(trailId) + .records() + .add(Data.fromString("Export clearance granted by Hamburg customs office"), "event:export_cleared", "export") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(exportBroker); + + console.log( + "Export broker added records", + exportFiled.output.sequenceNumber, + "and", + exportCleared.output.sequenceNumber + ".\n", + ); + + await supervisor + .trail(trailId) + .updateMetadata("Status: Awaiting Import Clearance") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(supervisor); + + // === Inspection gate === + + // The import broker does not hold an inspection-scoped capability at this point. + // The write attempt must fail to prove that tag-based access control is enforced. + let inspectionDenied = false; + try { + await importBroker + .trail(trailId) + .records() + .add( + Data.fromString("Import broker attempted to record an inspection result"), + "event:invalid_inspection_write", + "inspection", + ) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(importBroker); + inspectionDenied = true; + } catch { + // Expected + } + assert.equal( + inspectionDenied, + false, + "inspection-tagged writes should fail before an inspection-scoped capability exists", + ); + console.log("Inspection write was correctly denied before the inspector role existed.\n"); + + // A customs inspection is triggered; the inspector role is created and issued mid-process. + await issueTaggedRecordRole(admin, trailId, "Inspector", "inspection", inspector.senderAddress()); + + const inspectionDone = await inspector + .trail(trailId) + .records() + .add( + Data.fromString("Customs inspection completed with no discrepancies"), + "event:inspection_completed", + "inspection", + ) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(inspector); + console.log("Inspector added record", inspectionDone.output.sequenceNumber + ".\n"); + + // === Import clearance === + + const dutyAssessed = await importBroker + .trail(trailId) + .records() + .add(Data.fromString("Import duty assessed and paid"), "event:duty_assessed", "import") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(importBroker); + + const importCleared = await importBroker + .trail(trailId) + .records() + .add(Data.fromString("Import clearance granted by Nairobi customs"), "event:import_cleared", "import") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(importBroker); + + console.log( + "Import broker added records", + dutyAssessed.output.sequenceNumber, + "and", + importCleared.output.sequenceNumber + ".\n", + ); + + await supervisor + .trail(trailId) + .updateMetadata("Status: Cleared") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(supervisor); + + // === Final lock and verification === + + await lockingAdmin + .trail(trailId) + .locking() + .updateWriteLock(TimeLock.withInfinite()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(lockingAdmin); + + const afterLock = await admin.trail(trailId).get(); + console.log("Write lock after clearance:", afterLock.lockingConfig.writeLock, "\n"); + + let lateWriteSucceeded = false; + try { + await docsOperator + .trail(trailId) + .records() + .add(Data.fromString("Late customs note after the case was closed"), "event:late_note", "documents") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(docsOperator); + lateWriteSucceeded = true; + } catch { + // Expected + } + assert.equal(lateWriteSucceeded, false, "cleared customs trail should reject late writes after the final lock"); + + const firstPage = await admin.trail(trailId).records().listPage(undefined, 20); + console.log("Recorded customs events:"); + for (const record of firstPage.records) { + console.log(` #${record.sequenceNumber} | ${record.data} | tag=${record.tag} | ${record.metadata}`); + } + + assert.equal(firstPage.records.length, 7, "expected 7 customs records including the initial case-opened record"); + + const trailState = await admin.trail(trailId).get(); + assert.equal(trailState.updatableMetadata, "Status: Cleared", "customs case should finish in cleared state"); + + console.log("\nCustoms clearance completed successfully."); +} + +async function issueTaggedRecordRole( + admin: AuditTrailClient, + trailId: string, + roleName: string, + tag: string, + issuedTo: string, +): Promise { + await admin + .trail(trailId) + .access() + .forRole(roleName) + .create(PermissionSet.recordAdminPermissions(), new RoleTags([tag])) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await admin + .trail(trailId) + .access() + .forRole(roleName) + .issueCapability(new CapabilityIssueOptions(issuedTo)) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); +} diff --git a/bindings/wasm/audit_trail_wasm/examples/src/real-world/02_clinical_trial.ts b/bindings/wasm/audit_trail_wasm/examples/src/real-world/02_clinical_trial.ts new file mode 100644 index 00000000..597602a7 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/real-world/02_clinical_trial.ts @@ -0,0 +1,315 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +/** + * # Clinical Trial Data-Integrity Example + * + * Models a Phase III clinical trial where an immutable audit trail + * guarantees data integrity, role-scoped access, and time-constrained oversight. + * + * ## Actors + * + * - **Admin**: Creates the trail and sets up all roles and capabilities. + * - **Enroller**: Writes enrollment events. Restricted to the `enrollment` tag. + * - **SafetyOfficer**: Records adverse events and safety observations. Restricted to `safety`. + * - **EfficacyReviewer**: Records treatment outcomes. Restricted to `efficacy`. + * - **PkAnalyst**: Records pharmacokinetic results. Restricted to the `pk` tag that is added + * mid-study when a PK sub-study is initiated. + * - **Monitor**: Updates the mutable study-phase metadata. Access is time-windowed to the + * active study period (90 days from now). + * - **DataSafetyBoard**: Controls write and delete locks. Freezes the dataset after review. + * - **Regulator**: Read-only verifier. In production this would use `AuditTrailClientReadOnly` + * (no signing key); here a funded client is used to keep the example self-contained. + * + * ## How the trail is used + * + * - immutable_metadata: protocol identity and study description + * - updatable_metadata: current study phase (updated as the trial progresses) + * - record tags: enrollment, safety, efficacy, pk (added mid-study) + * - roles and capabilities: each role writes only its designated tag + * - time-constrained capabilities: Monitor access is windowed to the study period + * - locking: a deletion window protects recent records; a time-lock freezes the + * dataset after the Data Safety Board completes its review + * - read-only verification: a regulator inspects the trail without write access + */ + +import { + AuditTrailClient, + CapabilityIssueOptions, + Data, + LockingConfig, + LockingWindow, + PermissionSet, + RoleTags, + TimeLock, +} from "@iota/audit-trail/node"; +import { strict as assert } from "assert"; +import { getFundedClient, TEST_GAS_BUDGET } from "../util"; + +export async function clinicalTrial(): Promise { + console.log("=== Clinical Trial Data Integrity ===\n"); + + const admin = await getFundedClient(); + const enroller = await getFundedClient(); + const safetyOfficer = await getFundedClient(); + const efficacyReviewer = await getFundedClient(); + const pkAnalyst = await getFundedClient(); + const monitor = await getFundedClient(); + const dataSafetyBoard = await getFundedClient(); + const regulator = await getFundedClient(); + + // === Create the clinical-trial trail === + + console.log("Creating the clinical-trial audit trail..."); + + const { output: created } = await admin + .createTrail() + .withRecordTags(["enrollment", "safety", "efficacy"]) + .withTrailMetadata( + "Protocol CTR-2026-03742", + "Phase III: Efficacy of Drug X vs Placebo in Moderate-to-Severe Asthma", + ) + .withUpdatableMetadata("Phase: Enrollment") + .withLockingConfig( + new LockingConfig(LockingWindow.withCountBased(BigInt(3)), TimeLock.withNone(), TimeLock.withNone()), + ) + .withInitialRecordString( + "Clinical trial CTR-2026-03742 opened for enrollment", + "event:trial_opened", + "enrollment", + ) + .finish() + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const trailId = created.id; + console.log("Trail created with ID", trailId, "\n"); + + // === Define roles with tag-scoped permissions === + + console.log("Defining study roles..."); + + await issueTaggedRecordRole(admin, trailId, "Enroller", "enrollment", enroller.senderAddress()); + await issueTaggedRecordRole(admin, trailId, "SafetyOfficer", "safety", safetyOfficer.senderAddress()); + await issueTaggedRecordRole(admin, trailId, "EfficacyReviewer", "efficacy", efficacyReviewer.senderAddress()); + + // Monitor can update metadata (study phase) — valid for 90 days. + await admin + .trail(trailId) + .access() + .forRole("Monitor") + .create(PermissionSet.metadataAdminPermissions()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + const nowMs = BigInt(Date.now()); + const studyEndMs = nowMs + BigInt(90 * 24 * 60 * 60 * 1000); + + await admin + .trail(trailId) + .access() + .forRole("Monitor") + .issueCapability(new CapabilityIssueOptions(monitor.senderAddress(), nowMs, studyEndMs)) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + console.log("Monitor capability issued (expires at timestamp", studyEndMs + ")\n"); + + // Data Safety Board can manage locking. + await admin + .trail(trailId) + .access() + .forRole("DataSafetyBoard") + .create(PermissionSet.lockingAdminPermissions()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await admin + .trail(trailId) + .access() + .forRole("DataSafetyBoard") + .issueCapability(new CapabilityIssueOptions(dataSafetyBoard.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + + // === Enrollment phase === + + console.log("--- Enrollment Phase ---"); + + const enrolled = await enroller + .trail(trailId) + .records() + .add(Data.fromString("Patient P-101 enrolled at Site Hamburg"), "event:patient_enrolled", "enrollment") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(enroller); + console.log("Enroller added record", enrolled.output.sequenceNumber + ".\n"); + + // === Study data collection === + + console.log("--- Study Data Collection ---"); + + const safetyEvent = await safetyOfficer + .trail(trailId) + .records() + .add( + Data.fromString("Adverse event: mild headache reported by Patient P-101"), + "event:adverse_event", + "safety", + ) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(safetyOfficer); + + const efficacyRecord = await efficacyReviewer + .trail(trailId) + .records() + .add( + Data.fromString("Week 12: FEV1 improvement of 320 mL over baseline for P-101"), + "event:efficacy_observed", + "efficacy", + ) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(efficacyReviewer); + + console.log( + "SafetyOfficer added record", + safetyEvent.output.sequenceNumber, + ", EfficacyReviewer added record", + efficacyRecord.output.sequenceNumber + ".\n", + ); + + // === Mid-study amendment: add pharmacokinetics tag === + + console.log("--- Mid-Study Amendment ---"); + + await admin.trail(trailId).tags().add("pk").withGasBudget(TEST_GAS_BUDGET).buildAndExecute(admin); + console.log("Added tag \"pk\" (pharmacokinetics) to the trail."); + + await issueTaggedRecordRole(admin, trailId, "PkAnalyst", "pk", pkAnalyst.senderAddress()); + + const pkRecord = await pkAnalyst + .trail(trailId) + .records() + .add(Data.fromString("PK analysis: Cmax reached at 2.4 h, half-life 8.7 h"), "event:pk_result", "pk") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(pkAnalyst); + console.log("PkAnalyst added record", pkRecord.output.sequenceNumber + ".\n"); + + // === Deletion window enforcement === + + console.log("--- Deletion Window Enforcement ---"); + + // The PkAnalyst has RecordAdmin permissions, but the count-based deletion window + // protects the newest 3 records, so this attempt must fail. + let deleteSucceeded = false; + try { + await pkAnalyst + .trail(trailId) + .records() + .delete(pkRecord.output.sequenceNumber) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(pkAnalyst); + deleteSucceeded = true; + } catch { + // Expected + } + assert.equal(deleteSucceeded, false, "recent records must be protected by the count-based deletion window"); + console.log( + "Record", + pkRecord.output.sequenceNumber, + "is within the deletion window (newest 3) and cannot be deleted.\n", + ); + + // === Metadata update (Monitor) === + + console.log("--- Metadata Update ---"); + + await monitor + .trail(trailId) + .updateMetadata("Phase: Data Review") + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(monitor); + + const trail = await admin.trail(trailId).get(); + console.log("Study phase updated to:", trail.updatableMetadata, "\n"); + + // === Data Safety Board locks the study dataset === + + console.log("--- Data Safety Board Lock ---"); + + const lockUntilMs = nowMs + BigInt(365 * 24 * 60 * 60 * 1000); // 1 year from now + + await dataSafetyBoard + .trail(trailId) + .locking() + .updateWriteLock(TimeLock.withUnlockAtMs(lockUntilMs)) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(dataSafetyBoard); + + console.log("Write lock set to UnlockAtMs(" + lockUntilMs + ") — writes blocked until that timestamp.\n"); + + // Lock trail from deletion permanently. + await dataSafetyBoard + .trail(trailId) + .locking() + .updateDeleteTrailLock(TimeLock.withInfinite()) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(dataSafetyBoard); + + const finalLocking = await admin.trail(trailId).get(); + console.log( + "Delete-trail lock set to", + finalLocking.lockingConfig.deleteTrailLock.type, + "— trail cannot be deleted.\n", + ); + + // === Regulator read-only verification === + + console.log("--- Regulator Verification ---"); + + // In production the regulator would use AuditTrailClientReadOnly (no signing key). + // Here a funded client is used to keep the example self-contained. + const regulatorHandle = regulator.trail(trailId); + const onChain = await regulatorHandle.get(); + + console.log("Protocol:", onChain.immutableMetadata); + console.log("Phase: ", onChain.updatableMetadata); + console.log("Roles: ", onChain.roles.roles.map((r) => r.name)); + console.log("Tags: ", onChain.tags.map((t) => t.tag)); + + const firstPage = await regulatorHandle.records().listPage(undefined, 20); + console.log("\nVerified records (" + firstPage.records.length + " total):"); + for (const record of firstPage.records) { + console.log(` #${record.sequenceNumber} | tag=${record.tag} | ${record.metadata}`); + } + + assert.equal(firstPage.records.length, 5, "expected 5 records (initial + enrolled + safety + efficacy + pk)"); + assert.ok(onChain.tags.some((t) => t.tag === "pk"), "the 'pk' tag must exist after mid-study amendment"); + assert.equal(onChain.lockingConfig.deleteRecordWindow.type, LockingWindow.withCountBased(BigInt(3)).type); + assert.equal(onChain.lockingConfig.deleteTrailLock.type, TimeLock.withInfinite().type); + assert.equal(onChain.lockingConfig.writeLock.type, TimeLock.withUnlockAtMs(lockUntilMs).type); + assert.equal(onChain.updatableMetadata, "Phase: Data Review"); + + console.log("\nClinical trial data-integrity verification completed successfully."); +} + +async function issueTaggedRecordRole( + admin: AuditTrailClient, + trailId: string, + roleName: string, + tag: string, + issuedTo: string, +): Promise { + await admin + .trail(trailId) + .access() + .forRole(roleName) + .create(PermissionSet.recordAdminPermissions(), new RoleTags([tag])) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); + await admin + .trail(trailId) + .access() + .forRole(roleName) + .issueCapability(new CapabilityIssueOptions(issuedTo)) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(admin); +} diff --git a/bindings/wasm/audit_trail_wasm/examples/src/tests.ts b/bindings/wasm/audit_trail_wasm/examples/src/tests.ts new file mode 100644 index 00000000..7848a8ef --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/tests.ts @@ -0,0 +1,64 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +import { afterEach, describe, it } from "mocha"; + +import { createAuditTrail } from "./01_create_audit_trail"; +import { addAndReadRecords } from "./02_add_and_read_records"; +import { updateMetadata } from "./03_update_metadata"; +import { configureLocking } from "./04_configure_locking"; +import { manageAccess } from "./05_manage_access"; +import { deleteRecords } from "./06_delete_records"; +import { accessReadOnlyMethods } from "./07_access_read_only_methods"; +import { deleteAuditTrail } from "./08_delete_audit_trail"; +import { taggedRecords } from "./advanced/09_tagged_records"; +import { capabilityConstraints } from "./advanced/10_capability_constraints"; +import { manageRecordTags } from "./advanced/11_manage_record_tags"; +import { customsClearance } from "./real-world/01_customs_clearance"; +import { clinicalTrial } from "./real-world/02_clinical_trial"; + +describe("Audit trail wasm node examples", function() { + afterEach(() => { + console.log("\n----------------------------------------------------\n"); + }); + + it("creates a trail", async () => { + await createAuditTrail(); + }); + it("adds and reads records", async () => { + await addAndReadRecords(); + }); + it("updates metadata", async () => { + await updateMetadata(); + }); + it("configures locking", async () => { + await configureLocking(); + }); + it("manages access", async () => { + await manageAccess(); + }); + it("deletes records", async () => { + await deleteRecords(); + }); + it("accesses read-only methods", async () => { + await accessReadOnlyMethods(); + }); + it("deletes an audit trail", async () => { + await deleteAuditTrail(); + }); + it("uses tagged records", async () => { + await taggedRecords(); + }); + it("constrains capabilities", async () => { + await capabilityConstraints(); + }); + it("manages record tags", async () => { + await manageRecordTags(); + }); + it("runs customs clearance example", async () => { + await customsClearance(); + }); + it("runs clinical trial example", async () => { + await clinicalTrial(); + }); +}); diff --git a/bindings/wasm/audit_trail_wasm/examples/src/util.ts b/bindings/wasm/audit_trail_wasm/examples/src/util.ts new file mode 100644 index 00000000..96fc9415 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/util.ts @@ -0,0 +1,98 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +import { + AuditTrailClient, + AuditTrailClientReadOnly, + CapabilityIssueOptions, + LockingConfig, + LockingWindow, + PackageOverrides, + Permission, + PermissionSet, + TimeLock, +} from "@iota/audit-trail/node"; +import { Ed25519KeypairSigner } from "@iota/iota-interaction-ts/node/test_utils"; +import { IotaClient } from "@iota/iota-sdk/client"; +import { getFaucetHost, requestIotaFromFaucetV0 } from "@iota/iota-sdk/faucet"; +import { Ed25519Keypair } from "@iota/iota-sdk/keypairs/ed25519"; + +export const IOTA_AUDIT_TRAIL_PKG_ID = globalThis?.process?.env?.IOTA_AUDIT_TRAIL_PKG_ID || ""; +export const IOTA_TF_COMPONENTS_PKG_ID = globalThis?.process?.env?.IOTA_TF_COMPONENTS_PKG_ID || ""; +export const NETWORK_NAME_FAUCET = globalThis?.process?.env?.NETWORK_NAME_FAUCET || "localnet"; +export const NETWORK_URL = globalThis?.process?.env?.NETWORK_URL || "http://127.0.0.1:9000"; +export const TEST_GAS_BUDGET = BigInt(50_000_000); + +if (!IOTA_AUDIT_TRAIL_PKG_ID || !IOTA_TF_COMPONENTS_PKG_ID) { + throw new Error( + "IOTA_AUDIT_TRAIL_PKG_ID and IOTA_TF_COMPONENTS_PKG_ID env variables must be set to run the examples", + ); +} + +export async function requestFunds(address: string) { + await requestIotaFromFaucetV0({ + host: getFaucetHost(NETWORK_NAME_FAUCET), + recipient: address, + }); +} + +export async function getReadOnlyClient(): Promise { + const iotaClient = new IotaClient({ url: NETWORK_URL }); + return AuditTrailClientReadOnly.createWithPackageOverrides( + iotaClient, + new PackageOverrides(IOTA_AUDIT_TRAIL_PKG_ID, IOTA_TF_COMPONENTS_PKG_ID), + ); +} + +export async function getFundedClient(): Promise { + const readOnlyClient = await getReadOnlyClient(); + const keypair = Ed25519Keypair.generate(); + const signer = new Ed25519KeypairSigner(keypair); + const client = await AuditTrailClient.create(readOnlyClient, signer); + + await requestFunds(client.senderAddress()); + + const balance = await client.iotaClient().getBalance({ owner: client.senderAddress() }); + if (balance.totalBalance === "0") { + throw new Error("Balance is still 0 after faucet funding"); + } + + console.log(`Received gas from faucet: ${balance.totalBalance} for owner ${client.senderAddress()}`); + return client; +} + +export function defaultLockingConfig(): LockingConfig { + return new LockingConfig( + LockingWindow.withCountBased(BigInt(100)), + TimeLock.withNone(), + TimeLock.withNone(), + ); +} + +export async function createTrailWithSeedRecord(client: AuditTrailClient) { + return client + .createTrail() + .withTrailMetadata("Example Audit Trail", "WASM example trail") + .withUpdatableMetadata("seed metadata") + .withLockingConfig(defaultLockingConfig()) + .withInitialRecordString("seed record", "v1") + .finish() + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(client); +} + +export async function grantSelfRecordPermissions(client: AuditTrailClient, trailId: string): Promise { + const role = client.trail(trailId).access().forRole("example-record-writer"); + const permissions = new PermissionSet([ + Permission.AddRecord, + Permission.DeleteRecord, + Permission.DeleteAllRecords, + Permission.CorrectRecord, + ]); + + await role.create(permissions).withGasBudget(TEST_GAS_BUDGET).buildAndExecute(client); + await role + .issueCapability(new CapabilityIssueOptions(client.senderAddress())) + .withGasBudget(TEST_GAS_BUDGET) + .buildAndExecute(client); +} diff --git a/bindings/wasm/audit_trail_wasm/examples/src/web-main.ts b/bindings/wasm/audit_trail_wasm/examples/src/web-main.ts new file mode 100644 index 00000000..1555454d --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/src/web-main.ts @@ -0,0 +1,54 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +import { createAuditTrail } from "./01_create_audit_trail"; +import { addAndReadRecords } from "./02_add_and_read_records"; +import { updateMetadata } from "./03_update_metadata"; +import { configureLocking } from "./04_configure_locking"; +import { manageAccess } from "./05_manage_access"; +import { deleteRecords } from "./06_delete_records"; +import { accessReadOnlyMethods } from "./07_access_read_only_methods"; +import { deleteAuditTrail } from "./08_delete_audit_trail"; +import { taggedRecords } from "./advanced/09_tagged_records"; +import { capabilityConstraints } from "./advanced/10_capability_constraints"; +import { manageRecordTags } from "./advanced/11_manage_record_tags"; +import { customsClearance } from "./real-world/01_customs_clearance"; +import { clinicalTrial } from "./real-world/02_clinical_trial"; + +export async function main(example?: string) { + const argument = example ?? new URLSearchParams(window.location.search).get("example")?.toLowerCase(); + if (!argument) { + throw new Error("Please specify an example name, e.g. '01_create_audit_trail'"); + } + + switch (argument) { + case "01_create_audit_trail": + return createAuditTrail(); + case "02_add_and_read_records": + return addAndReadRecords(); + case "03_update_metadata": + return updateMetadata(); + case "04_configure_locking": + return configureLocking(); + case "05_manage_access": + return manageAccess(); + case "06_delete_records": + return deleteRecords(); + case "07_access_read_only_methods": + return accessReadOnlyMethods(); + case "08_delete_audit_trail": + return deleteAuditTrail(); + case "09_tagged_records": + return taggedRecords(); + case "10_capability_constraints": + return capabilityConstraints(); + case "11_manage_record_tags": + return manageRecordTags(); + case "01_customs_clearance": + return customsClearance(); + case "02_clinical_trial": + return clinicalTrial(); + default: + throw new Error(`Unknown example name: '${argument}'`); + } +} diff --git a/bindings/wasm/audit_trail_wasm/examples/tsconfig.node.json b/bindings/wasm/audit_trail_wasm/examples/tsconfig.node.json new file mode 100644 index 00000000..e82ad401 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/tsconfig.node.json @@ -0,0 +1,22 @@ +{ + "compilerOptions": { + "target": "es2020", + "baseUrl": "./", + "lib": [ + "ES6", + "dom" + ], + "esModuleInterop": true, + "module": "commonjs", + "moduleResolution": "node", + "skipLibCheck": true, + "paths": { + "@iota/audit-trail/node": [ + "../node" + ] + } + }, + "include": [ + "./src/**/*.ts" + ] +} diff --git a/bindings/wasm/audit_trail_wasm/examples/tsconfig.web.json b/bindings/wasm/audit_trail_wasm/examples/tsconfig.web.json new file mode 100644 index 00000000..4bfdc7c0 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/examples/tsconfig.web.json @@ -0,0 +1,22 @@ +{ + "compilerOptions": { + "target": "es2020", + "outDir": "./dist/web", + "baseUrl": "./", + "lib": [ + "ES6", + "dom" + ], + "esModuleInterop": true, + "moduleResolution": "node", + "skipLibCheck": true, + "paths": { + "@iota/audit-trail/node": [ + "../web" + ] + } + }, + "exclude": [ + "tests" + ] +} diff --git a/bindings/wasm/audit_trail_wasm/lib/index.ts b/bindings/wasm/audit_trail_wasm/lib/index.ts new file mode 100644 index 00000000..dbbf1ead --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/lib/index.ts @@ -0,0 +1,5 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +export * from "@iota/iota-interaction-ts/transaction_internal"; +export * from "~audit_trail_wasm"; diff --git a/bindings/wasm/audit_trail_wasm/lib/tsconfig.json b/bindings/wasm/audit_trail_wasm/lib/tsconfig.json new file mode 100644 index 00000000..7522c000 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/lib/tsconfig.json @@ -0,0 +1,21 @@ +{ + "extends": "../tsconfig.node.json", + "compilerOptions": { + "baseUrl": "./", + "paths": { + "~audit_trail_wasm": [ + "../node/audit_trail_wasm", + "./audit_trail_wasm.js" + ], + "@iota/iota-interaction-ts/*": [ + "../node_modules/@iota/iota-interaction-ts/node/*", + "@iota/iota-interaction-ts/node/" + ], + "../lib": [ + "." + ] + }, + "outDir": "../node", + "declarationDir": "../node" + } +} \ No newline at end of file diff --git a/bindings/wasm/audit_trail_wasm/lib/tsconfig.web.json b/bindings/wasm/audit_trail_wasm/lib/tsconfig.web.json new file mode 100644 index 00000000..9459a549 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/lib/tsconfig.web.json @@ -0,0 +1,22 @@ +{ + "extends": "../tsconfig.json", + "compilerOptions": { + "baseUrl": "./", + "paths": { + "~audit_trail_wasm": [ + "../web/audit_trail_wasm", + "./audit_trail_wasm.js" + ], + "@iota/iota-interaction-ts/*": [ + "../node_modules/@iota/iota-interaction-ts/web/*", + "@iota/iota-interaction-ts/web/" + ], + "../lib": [ + "." + ] + }, + "outDir": "../web", + "declarationDir": "../web", + "module": "ES2022" + } +} \ No newline at end of file diff --git a/bindings/wasm/audit_trail_wasm/package-lock.json b/bindings/wasm/audit_trail_wasm/package-lock.json new file mode 100644 index 00000000..6311fa79 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/package-lock.json @@ -0,0 +1,4507 @@ +{ + "name": "@iota/audit-trail", + "version": "0.1.0-alpha", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "@iota/audit-trail", + "version": "0.1.0-alpha", + "license": "Apache-2.0", + "dependencies": { + "@iota/iota-interaction-ts": "^0.12.0" + }, + "devDependencies": { + "@types/mocha": "^9.1.0", + "@types/node": "^22.0.0", + "cypress": "^14.2.0", + "dprint": "^0.33.0", + "mocha": "^9.2.0", + "rimraf": "^6.0.1", + "start-server-and-test": "^2.0.11", + "ts-mocha": "^9.0.2", + "ts-node": "^10.9.2", + "tsconfig-paths": "^4.1.0", + "typedoc": "^0.28.5", + "typedoc-plugin-markdown": "^4.4.1", + "typescript": "^5.7.3", + "wasm-opt": "^1.4.0" + }, + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "@iota/iota-sdk": "^1.11.0" + } + }, + "node_modules/@0no-co/graphql.web": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@0no-co/graphql.web/-/graphql.web-1.2.0.tgz", + "integrity": "sha512-/1iHy9TTr63gE1YcR5idjx8UREz1s0kFhydf3bBLCXyqjhkIc6igAzTOx3zPifCwFR87tsh/4Pa9cNts6d2otw==", + "license": "MIT", + "peer": true, + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0" + }, + "peerDependenciesMeta": { + "graphql": { + "optional": true + } + } + }, + "node_modules/@0no-co/graphqlsp": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/@0no-co/graphqlsp/-/graphqlsp-1.15.2.tgz", + "integrity": "sha512-Ys031WnS3sTQQBtRTkQsYnw372OlW72ais4sp0oh2UMPRNyxxnq85zRfU4PIdoy9kWriysPT5BYAkgIxhbonFA==", + "license": "MIT", + "peer": true, + "dependencies": { + "@gql.tada/internal": "^1.0.0", + "graphql": "^15.5.0 || ^16.0.0 || ^17.0.0" + }, + "peerDependencies": { + "graphql": "^15.5.0 || ^16.0.0 || ^17.0.0", + "typescript": "^5.0.0" + } + }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@cypress/request": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@cypress/request/-/request-3.0.10.tgz", + "integrity": "sha512-hauBrOdvu08vOsagkZ/Aju5XuiZx6ldsLfByg1htFeldhex+PeMrYauANzFsMJeAA0+dyPLbDoX2OYuvVoLDkQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "aws-sign2": "~0.7.0", + "aws4": "^1.8.0", + "caseless": "~0.12.0", + "combined-stream": "~1.0.6", + "extend": "~3.0.2", + "forever-agent": "~0.6.1", + "form-data": "~4.0.4", + "http-signature": "~1.4.0", + "is-typedarray": "~1.0.0", + "isstream": "~0.1.2", + "json-stringify-safe": "~5.0.1", + "mime-types": "~2.1.19", + "performance-now": "^2.1.0", + "qs": "~6.14.1", + "safe-buffer": "^5.1.2", + "tough-cookie": "^5.0.0", + "tunnel-agent": "^0.6.0", + "uuid": "^8.3.2" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/@cypress/xvfb": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@cypress/xvfb/-/xvfb-1.2.4.tgz", + "integrity": "sha512-skbBzPggOVYCbnGgV+0dmBdW/s77ZkAOXIC1knS8NagwDjBrNC1LuXtQJeiN6l+m7lzmHtaoUw/ctJKdqkG57Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^3.1.0", + "lodash.once": "^4.1.1" + } + }, + "node_modules/@cypress/xvfb/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/@gerrit0/mini-shiki": { + "version": "3.23.0", + "resolved": "https://registry.npmjs.org/@gerrit0/mini-shiki/-/mini-shiki-3.23.0.tgz", + "integrity": "sha512-bEMORlG0cqdjVyCEuU0cDQbORWX+kYCeo0kV1lbxF5bt4r7SID2l9bqsxJEM0zndaxpOUT7riCyIVEuqq/Ynxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@shikijs/engine-oniguruma": "^3.23.0", + "@shikijs/langs": "^3.23.0", + "@shikijs/themes": "^3.23.0", + "@shikijs/types": "^3.23.0", + "@shikijs/vscode-textmate": "^10.0.2" + } + }, + "node_modules/@gql.tada/cli-utils": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/@gql.tada/cli-utils/-/cli-utils-1.7.2.tgz", + "integrity": "sha512-Qbc7hbLvCz6IliIJpJuKJa9p05b2Jona7ov7+qofCsMRxHRZE1kpAmZMvL8JCI4c0IagpIlWNaMizXEQUe8XjQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "@0no-co/graphqlsp": "^1.12.13", + "@gql.tada/internal": "1.0.8", + "graphql": "^15.5.0 || ^16.0.0 || ^17.0.0" + }, + "peerDependencies": { + "@0no-co/graphqlsp": "^1.12.13", + "@gql.tada/svelte-support": "1.0.1", + "@gql.tada/vue-support": "1.0.1", + "graphql": "^15.5.0 || ^16.0.0 || ^17.0.0", + "typescript": "^5.0.0" + }, + "peerDependenciesMeta": { + "@gql.tada/svelte-support": { + "optional": true + }, + "@gql.tada/vue-support": { + "optional": true + } + } + }, + "node_modules/@gql.tada/internal": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@gql.tada/internal/-/internal-1.0.8.tgz", + "integrity": "sha512-XYdxJhtHC5WtZfdDqtKjcQ4d7R1s0d1rnlSs3OcBEUbYiPoJJfZU7tWsVXuv047Z6msvmr4ompJ7eLSK5Km57g==", + "license": "MIT", + "peer": true, + "dependencies": { + "@0no-co/graphql.web": "^1.0.5" + }, + "peerDependencies": { + "graphql": "^15.5.0 || ^16.0.0 || ^17.0.0", + "typescript": "^5.0.0" + } + }, + "node_modules/@graphql-typed-document-node/core": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@graphql-typed-document-node/core/-/core-3.2.0.tgz", + "integrity": "sha512-mB9oAsNCm9aM3/SOv4YtBMqZbYj10R7dkq8byBqxGY/ncFwhf2oQzMV+LCRlWoDSEBJ3COiR1yeDvMtsoOsuFQ==", + "license": "MIT", + "peer": true, + "peerDependencies": { + "graphql": "^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@hapi/address": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/@hapi/address/-/address-5.1.1.tgz", + "integrity": "sha512-A+po2d/dVoY7cYajycYI43ZbYMXukuopIsqCjh5QzsBCipDtdofHntljDlpccMjIfTy6UOkg+5KPriwYch2bXA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/hoek": "^11.0.2" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@hapi/formula": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@hapi/formula/-/formula-3.0.2.tgz", + "integrity": "sha512-hY5YPNXzw1He7s0iqkRQi+uMGh383CGdyyIGYtB+W5N3KHPXoqychklvHhKCC9M3Xtv0OCs/IHw+r4dcHtBYWw==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@hapi/hoek": { + "version": "11.0.7", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-11.0.7.tgz", + "integrity": "sha512-HV5undWkKzcB4RZUusqOpcgxOaq6VOAH7zhhIr2g3G8NF/MlFO75SjOr2NfuSx0Mh40+1FqCkagKLJRykUWoFQ==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@hapi/pinpoint": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@hapi/pinpoint/-/pinpoint-2.0.1.tgz", + "integrity": "sha512-EKQmr16tM8s16vTT3cA5L0kZZcTMU5DUOZTuvpnY738m+jyP3JIUj+Mm1xc1rsLkGBQ/gVnfKYPwOmPg1tUR4Q==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@hapi/tlds": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/@hapi/tlds/-/tlds-1.1.6.tgz", + "integrity": "sha512-xdi7A/4NZokvV0ewovme3aUO5kQhW9pQ2YD1hRqZGhhSi5rBv4usHYidVocXSi9eihYsznZxLtAiEYYUL6VBGw==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@hapi/topo": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-6.0.2.tgz", + "integrity": "sha512-KR3rD5inZbGMrHmgPxsJ9dbi6zEK+C3ZwUwTa+eMwWLz7oijWUTWD2pMSNNYJAU6Qq+65NkxXjqHr/7LM2Xkqg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/hoek": "^11.0.2" + } + }, + "node_modules/@iota/bcs": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@iota/bcs/-/bcs-1.5.0.tgz", + "integrity": "sha512-/hv395YtUcRNLY00v7Cl2O+KvVUaUajg4OucZENgSE4Xu1ygUGsLD3dU5FixOUVOn7Abo+n7+KYr9PE/1dsvWg==", + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@scure/base": "^1.2.4" + } + }, + "node_modules/@iota/iota-interaction-ts": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@iota/iota-interaction-ts/-/iota-interaction-ts-0.12.0.tgz", + "integrity": "sha512-qGGn7DMpzDHCzdrvV4QWUXE1u/5UzX5Y7pWX9RNGkhlerD7gPk01abf4XjfmEhRkN3S2L7YBpnXK34LA6ZzC9w==", + "license": "Apache-2.0", + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "@iota/iota-sdk": "^1.11.0" + } + }, + "node_modules/@iota/iota-sdk": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/@iota/iota-sdk/-/iota-sdk-1.11.0.tgz", + "integrity": "sha512-Fveg/4euheaBUzU1ybPyFGe7sSfLFUjLNHhPjNFUmSBOMR+l9q3LU1QdN2sLElcmgJZ+BLxAEmL8TZ0eX3Khpw==", + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@graphql-typed-document-node/core": "^3.2.0", + "@iota/bcs": "1.5.0", + "@noble/curves": "^1.4.2", + "@noble/hashes": "^1.4.0", + "@scure/base": "^1.2.4", + "@scure/bip32": "^1.4.0", + "@scure/bip39": "^1.3.0", + "bignumber.js": "^9.1.1", + "gql.tada": "^1.8.2", + "graphql": "^16.9.0", + "valibot": "^1.2.0" + }, + "engines": { + "node": ">=24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, + "node_modules/@noble/curves": { + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/@noble/curves/-/curves-1.9.7.tgz", + "integrity": "sha512-gbKGcRUYIjA3/zCCNaWDciTMFI0dCkvou3TL8Zmy5Nc7sJ47a0jtOeZoTaMxkuqRo9cRhjOdZJXegxYE5FN/xw==", + "license": "MIT", + "peer": true, + "dependencies": { + "@noble/hashes": "1.8.0" + }, + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@noble/hashes": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.8.0.tgz", + "integrity": "sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A==", + "license": "MIT", + "peer": true, + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@scure/base": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.2.6.tgz", + "integrity": "sha512-g/nm5FgUa//MCj1gV09zTJTaM6KBAHqLN907YVQqf7zC49+DcO4B1so4ZX07Ef10Twr6nuqYEH9GEggFXA4Fmg==", + "license": "MIT", + "peer": true, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@scure/bip32": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/@scure/bip32/-/bip32-1.7.0.tgz", + "integrity": "sha512-E4FFX/N3f4B80AKWp5dP6ow+flD1LQZo/w8UnLGYZO674jS6YnYeepycOOksv+vLPSpgN35wgKgy+ybfTb2SMw==", + "license": "MIT", + "peer": true, + "dependencies": { + "@noble/curves": "~1.9.0", + "@noble/hashes": "~1.8.0", + "@scure/base": "~1.2.5" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@scure/bip39": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@scure/bip39/-/bip39-1.6.0.tgz", + "integrity": "sha512-+lF0BbLiJNwVlev4eKelw1WWLaiKXw7sSl8T6FvBlWkdX+94aGJ4o8XjUdlyhTCjd8c+B3KT3JfS8P0bLRNU6A==", + "license": "MIT", + "peer": true, + "dependencies": { + "@noble/hashes": "~1.8.0", + "@scure/base": "~1.2.5" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@shikijs/engine-oniguruma": { + "version": "3.23.0", + "resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-3.23.0.tgz", + "integrity": "sha512-1nWINwKXxKKLqPibT5f4pAFLej9oZzQTsby8942OTlsJzOBZ0MWKiwzMsd+jhzu8YPCHAswGnnN1YtQfirL35g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.23.0", + "@shikijs/vscode-textmate": "^10.0.2" + } + }, + "node_modules/@shikijs/langs": { + "version": "3.23.0", + "resolved": "https://registry.npmjs.org/@shikijs/langs/-/langs-3.23.0.tgz", + "integrity": "sha512-2Ep4W3Re5aB1/62RSYQInK9mM3HsLeB91cHqznAJMuylqjzNVAVCMnNWRHFtcNHXsoNRayP9z1qj4Sq3nMqYXg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.23.0" + } + }, + "node_modules/@shikijs/themes": { + "version": "3.23.0", + "resolved": "https://registry.npmjs.org/@shikijs/themes/-/themes-3.23.0.tgz", + "integrity": "sha512-5qySYa1ZgAT18HR/ypENL9cUSGOeI2x+4IvYJu4JgVJdizn6kG4ia5Q1jDEOi7gTbN4RbuYtmHh0W3eccOrjMA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.23.0" + } + }, + "node_modules/@shikijs/types": { + "version": "3.23.0", + "resolved": "https://registry.npmjs.org/@shikijs/types/-/types-3.23.0.tgz", + "integrity": "sha512-3JZ5HXOZfYjsYSk0yPwBrkupyYSLpAE26Qc0HLghhZNGTZg/SKxXIIgoxOpmmeQP0RRSDJTk1/vPfw9tbw+jSQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4" + } + }, + "node_modules/@shikijs/vscode-textmate": { + "version": "10.0.2", + "resolved": "https://registry.npmjs.org/@shikijs/vscode-textmate/-/vscode-textmate-10.0.2.tgz", + "integrity": "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@standard-schema/spec": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", + "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node10": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.12.tgz", + "integrity": "sha512-UCYBaeFvM11aU2y3YPZ//O5Rhj+xKyzy7mvcIoAjASbigy8mHMryP5cK7dgjlz2hWxh1g5pLw084E0a/wlUSFQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/json5": { + "version": "0.0.29", + "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", + "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==", + "dev": true, + "license": "MIT", + "optional": true + }, + "node_modules/@types/mocha": { + "version": "9.1.1", + "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-9.1.1.tgz", + "integrity": "sha512-Z61JK7DKDtdKTWwLeElSEBcWGRLY8g95ic5FoQqI9CMx0ns/Ghep3B4DfcEimiKMvtamNVULVNKEsiwV3aQmXw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "22.19.15", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.15.tgz", + "integrity": "sha512-F0R/h2+dsy5wJAUe3tAU6oqa2qbWY5TpNfL/RGmo1y38hiyO1w3x2jPtt76wmuaJI4DQnOBu21cNXQ2STIUUWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/sinonjs__fake-timers": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/@types/sinonjs__fake-timers/-/sinonjs__fake-timers-8.1.1.tgz", + "integrity": "sha512-0kSuKjAS0TrGLJ0M/+8MaFkGsQhZpB6pxOmvS3K8FYI72K//YmdfoW9X2qPsAKh1mkwxGD5zib9s1FIFed6E8g==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/sizzle": { + "version": "2.3.10", + "resolved": "https://registry.npmjs.org/@types/sizzle/-/sizzle-2.3.10.tgz", + "integrity": "sha512-TC0dmN0K8YcWEAEfiPi5gJP14eJe30TTGjkvek3iM/1NdHHsdCA/Td6GvNndMOo/iSnIsZ4HuuhrYPDAmbxzww==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/yauzl": { + "version": "2.10.3", + "resolved": "https://registry.npmjs.org/@types/yauzl/-/yauzl-2.10.3.tgz", + "integrity": "sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@ungap/promise-all-settled": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@ungap/promise-all-settled/-/promise-all-settled-1.1.2.tgz", + "integrity": "sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==", + "dev": true, + "license": "ISC" + }, + "node_modules/acorn": { + "version": "8.16.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz", + "integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.5", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.5.tgz", + "integrity": "sha512-HEHNfbars9v4pgpW6SO1KSPkfoS0xVOM/9UzkJltjlsHZmJasxg8aXkuZa7SMf8vKGIBhpUsPluQSqhJFCqebw==", + "dev": true, + "license": "MIT", + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-colors": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-escapes/node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arch": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/arch/-/arch-2.2.0.tgz", + "integrity": "sha512-Of/R0wqp83cgHozfIYLbBMnej79U/SVGOOyuB3VVFv1NRM/PSFMK12x9KVtiYzJqmnU5WR2qp0Z5rHb7sWGnFQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", + "dev": true, + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/arrify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", + "integrity": "sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/asn1": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", + "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": "~2.1.0" + } + }, + "node_modules/assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/astral-regex": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", + "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/async": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz", + "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==", + "dev": true, + "license": "MIT" + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/aws-sign2": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", + "integrity": "sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "*" + } + }, + "node_modules/aws4": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.13.2.tgz", + "integrity": "sha512-lHe62zvbTB5eEABUVi/AwVh0ZKY9rMMDhmm+eeyuuUQbQ3+J+fONVQOZyj+DdrvD4BY33uYniyRJ4UJIaSKAfw==", + "dev": true, + "license": "MIT" + }, + "node_modules/axios": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.15.0.tgz", + "integrity": "sha512-wWyJDlAatxk30ZJer+GeCWS209sA42X+N5jU2jy6oHTp7ufw8uzUTVFBX9+wTfAlhiJXGS0Bq7X6efruWjuK9Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.15.11", + "form-data": "^4.0.5", + "proxy-from-env": "^2.1.0" + } + }, + "node_modules/axios/node_modules/proxy-from-env": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-2.1.0.tgz", + "integrity": "sha512-cJ+oHTW1VAEa8cJslgmUZrc+sjRKgAKl3Zyse6+PV38hZe/V6Z14TbCuXcan9F9ghlz4QrFr2c92TNF82UkYHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tweetnacl": "^0.14.3" + } + }, + "node_modules/bignumber.js": { + "version": "9.3.1", + "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.1.tgz", + "integrity": "sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ==", + "license": "MIT", + "peer": true, + "engines": { + "node": "*" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/blob-util": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/blob-util/-/blob-util-2.0.2.tgz", + "integrity": "sha512-T7JQa+zsXXEa6/8ZhHcQEW1UFfVM49Ts65uBkFL6fz2QmrElqmbajIDJvuA0tEhRe5eIjpV9ZF+0RfZR9voJFQ==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/bluebird": { + "version": "3.7.2", + "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", + "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", + "dev": true, + "license": "MIT" + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true, + "license": "ISC" + }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/buffer-crc32": { + "version": "0.2.13", + "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", + "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/cachedir": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/cachedir/-/cachedir-2.4.0.tgz", + "integrity": "sha512-9EtFOZR8g22CL7BWjJ9BUx1+A/djkofnyW3aOXZORNW2kxoUpx2h+uN2cOqwPmFhnpVmxg+KW2OjOSgChTEvsQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/caseless": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", + "integrity": "sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chalk/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/check-more-types": { + "version": "2.24.0", + "resolved": "https://registry.npmjs.org/check-more-types/-/check-more-types-2.24.0.tgz", + "integrity": "sha512-Pj779qHxV2tuapviy1bSZNEL1maXr13bPYpsvSDB68HlYcYuhlDrmGd63i0JHMCLKzc7rUSNIrpdJlhVlNwrxA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/chokidar": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", + "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chownr": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", + "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/ci-info": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.4.0.tgz", + "integrity": "sha512-77PSwercCZU2Fc4sX94eF8k8Pxte6JAwL4/ICZLFjJLqegs7kCuAsqqj/70NQF6TvDpgFjkubQB2FW2ZZddvQg==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/cli-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "dev": true, + "license": "MIT", + "dependencies": { + "restore-cursor": "^3.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-table3": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.1.tgz", + "integrity": "sha512-w0q/enDHhPLq44ovMGdQeeDLvwxwavsJX7oQGYt/LrBlYsyaxyDnp6z3QzFut/6kLLKnlcUVJLrpB7KBfgG/RA==", + "dev": true, + "license": "MIT", + "dependencies": { + "string-width": "^4.2.0" + }, + "engines": { + "node": "10.* || >= 12.*" + }, + "optionalDependencies": { + "colors": "1.4.0" + } + }, + "node_modules/cli-truncate": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-2.1.0.tgz", + "integrity": "sha512-n8fOixwDD6b/ObinzTrp1ZKFzbgvKZvuz/TvejnLn1aQfC6r52XEx85FmuC+3HI+JM7coBRXUvNqEU2PHVrHpg==", + "dev": true, + "license": "MIT", + "dependencies": { + "slice-ansi": "^3.0.0", + "string-width": "^4.2.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==", + "dev": true, + "license": "MIT" + }, + "node_modules/colors": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz", + "integrity": "sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-6.2.1.tgz", + "integrity": "sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/common-tags": { + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/common-tags/-/common-tags-1.8.2.tgz", + "integrity": "sha512-gk/Z852D2Wtb//0I+kRFNKKE9dIIVirjoqPoA1wJU+XePVXZfGeBpk45+A1rKO4Q43prqWBNY/MiIeRLbPWUaA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/cypress": { + "version": "14.5.4", + "resolved": "https://registry.npmjs.org/cypress/-/cypress-14.5.4.tgz", + "integrity": "sha512-0Dhm4qc9VatOcI1GiFGVt8osgpPdqJLHzRwcAB5MSD/CAAts3oybvPUPawHyvJZUd8osADqZe/xzMsZ8sDTjXw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "@cypress/request": "^3.0.9", + "@cypress/xvfb": "^1.2.4", + "@types/sinonjs__fake-timers": "8.1.1", + "@types/sizzle": "^2.3.2", + "arch": "^2.2.0", + "blob-util": "^2.0.2", + "bluebird": "^3.7.2", + "buffer": "^5.7.1", + "cachedir": "^2.3.0", + "chalk": "^4.1.0", + "check-more-types": "^2.24.0", + "ci-info": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-table3": "0.6.1", + "commander": "^6.2.1", + "common-tags": "^1.8.0", + "dayjs": "^1.10.4", + "debug": "^4.3.4", + "enquirer": "^2.3.6", + "eventemitter2": "6.4.7", + "execa": "4.1.0", + "executable": "^4.1.1", + "extract-zip": "2.0.1", + "figures": "^3.2.0", + "fs-extra": "^9.1.0", + "getos": "^3.2.1", + "hasha": "5.2.2", + "is-installed-globally": "~0.4.0", + "lazy-ass": "^1.6.0", + "listr2": "^3.8.3", + "lodash": "^4.17.21", + "log-symbols": "^4.0.0", + "minimist": "^1.2.8", + "ospath": "^1.2.2", + "pretty-bytes": "^5.6.0", + "process": "^0.11.10", + "proxy-from-env": "1.0.0", + "request-progress": "^3.0.0", + "semver": "^7.7.1", + "supports-color": "^8.1.1", + "tmp": "~0.2.3", + "tree-kill": "1.2.2", + "untildify": "^4.0.0", + "yauzl": "^2.10.0" + }, + "bin": { + "cypress": "bin/cypress" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + } + }, + "node_modules/cypress/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==", + "dev": true, + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/dayjs": { + "version": "1.11.20", + "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.20.tgz", + "integrity": "sha512-YbwwqR/uYpeoP4pu043q+LTDLFBLApUP6VxRihdfNTqu4ubqMlGDLd6ErXhEgsyvY0K6nCs7nggYumAN+9uEuQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", + "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/debug/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true, + "license": "MIT" + }, + "node_modules/decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/diff": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.0.0.tgz", + "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/dprint": { + "version": "0.33.0", + "resolved": "https://registry.npmjs.org/dprint/-/dprint-0.33.0.tgz", + "integrity": "sha512-VploASP7wL1HAYe5xWZKRwp8gW5zTdcG3Tb60DASv6QLnGKsl+OS+bY7wsXFrS4UcIbUNujXdsNG5FxBfRJIQg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "yauzl": "=2.10.0" + }, + "bin": { + "dprint": "bin.js" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/duplexer": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", + "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==", + "dev": true, + "license": "MIT" + }, + "node_modules/ecc-jsbn": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", + "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/end-of-stream": { + "version": "1.4.5", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz", + "integrity": "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/enquirer": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.4.1.tgz", + "integrity": "sha512-rRqJg/6gd538VHvR3PSrdRBb/1Vy2YfzHqzvbhGIQpDRKIa4FgV/54b5Q1xYSxOOwKvjXweS26E0Q+nAMwp2pQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-colors": "^4.1.1", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/event-stream": { + "version": "3.3.4", + "resolved": "https://registry.npmjs.org/event-stream/-/event-stream-3.3.4.tgz", + "integrity": "sha512-QHpkERcGsR0T7Qm3HNJSyXKEEj8AHNxkY3PK8TS2KJvQ7NiSHe3DDpwVKKtoYprL/AreyzFBeIkBIWChAqn60g==", + "dev": true, + "license": "MIT", + "dependencies": { + "duplexer": "~0.1.1", + "from": "~0", + "map-stream": "~0.1.0", + "pause-stream": "0.0.11", + "split": "0.3", + "stream-combiner": "~0.0.4", + "through": "~2.3.1" + } + }, + "node_modules/eventemitter2": { + "version": "6.4.7", + "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-6.4.7.tgz", + "integrity": "sha512-tYUSVOGeQPKt/eC1ABfhHy5Xd96N3oIijJvN3O9+TsC28T5V9yX9oEfEK5faP0EFSNVOG97qtAS68GBrQB2hDg==", + "dev": true, + "license": "MIT" + }, + "node_modules/execa": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-4.1.0.tgz", + "integrity": "sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.0", + "get-stream": "^5.0.0", + "human-signals": "^1.1.1", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.0", + "onetime": "^5.1.0", + "signal-exit": "^3.0.2", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/executable": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/executable/-/executable-4.1.1.tgz", + "integrity": "sha512-8iA79xD3uAch729dUG8xaaBBFGaEa0wdD2VkYLFHwlqosEj/jT66AzcreRDSgV7ehnNLBW2WR5jIXwGKjVdTLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "pify": "^2.2.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "dev": true, + "license": "MIT" + }, + "node_modules/extract-zip": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-2.0.1.tgz", + "integrity": "sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "debug": "^4.1.1", + "get-stream": "^5.1.0", + "yauzl": "^2.10.0" + }, + "bin": { + "extract-zip": "cli.js" + }, + "engines": { + "node": ">= 10.17.0" + }, + "optionalDependencies": { + "@types/yauzl": "^2.9.1" + } + }, + "node_modules/extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==", + "dev": true, + "engines": [ + "node >=0.6.0" + ], + "license": "MIT" + }, + "node_modules/fd-slicer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", + "integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "pend": "~1.2.0" + } + }, + "node_modules/figures": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/figures/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "dev": true, + "license": "BSD-3-Clause", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/forever-agent": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", + "integrity": "sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "*" + } + }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "dev": true, + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/from": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/from/-/from-0.1.7.tgz", + "integrity": "sha512-twe20eF1OxVxp/ML/kq2p1uc6KvFK/+vs8WjEbeKmV2He22MKm7YF2ANIt+EOqhJ5L3K/SuuPhk0hWQDjOM23g==", + "dev": true, + "license": "MIT" + }, + "node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/fs-minipass": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/fs-minipass/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", + "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pump": "^3.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/getos": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/getos/-/getos-3.2.1.tgz", + "integrity": "sha512-U56CfOK17OKgTVqozZjUKNdkfEv6jk5WISBJ8SHoagjE6L69zOwl3Z+O8myjY9MEW3i2HPWQBt/LTbCgcC973Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "async": "^3.2.0" + } + }, + "node_modules/getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==", + "dev": true, + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0" + } + }, + "node_modules/glob": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", + "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz", + "integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/global-dirs": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", + "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ini": "2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gql.tada": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/gql.tada/-/gql.tada-1.9.0.tgz", + "integrity": "sha512-1LMiA46dRs5oF7Qev6vMU32gmiNvM3+3nHoQZA9K9j2xQzH8xOAWnnJrLSbZOFHTSdFxqn86TL6beo1/7ja/aA==", + "license": "MIT", + "peer": true, + "dependencies": { + "@0no-co/graphql.web": "^1.0.5", + "@0no-co/graphqlsp": "^1.12.13", + "@gql.tada/cli-utils": "1.7.2", + "@gql.tada/internal": "1.0.8" + }, + "bin": { + "gql-tada": "bin/cli.js", + "gql.tada": "bin/cli.js" + }, + "peerDependencies": { + "typescript": "^5.0.0" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/graphql": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.13.1.tgz", + "integrity": "sha512-gGgrVCoDKlIZ8fIqXBBb0pPKqDgki0Z/FSKNiQzSGj2uEYHr1tq5wmBegGwJx6QB5S5cM0khSBpi/JFHMCvsmQ==", + "license": "MIT", + "peer": true, + "engines": { + "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" + } + }, + "node_modules/growl": { + "version": "1.10.5", + "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", + "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4.x" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasha": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/hasha/-/hasha-5.2.2.tgz", + "integrity": "sha512-Hrp5vIK/xr5SkeN2onO32H0MgNZ0f17HRNH39WfL0SYUNOTZ5Lz1TJ8Pajo/87dYGEFlLMm7mIc/k/s6Bvz9HQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-stream": "^2.0.0", + "type-fest": "^0.8.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, + "node_modules/http-signature": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.4.0.tgz", + "integrity": "sha512-G5akfn7eKbpDN+8nPS/cb57YeA1jLTVxjpCj7tmm3QKPdyDy7T+qSC40e9ptydSWvkwjSXw1VbkpyEm39ukeAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0", + "jsprim": "^2.0.2", + "sshpk": "^1.18.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/human-signals": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-1.1.1.tgz", + "integrity": "sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=8.12.0" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/ini": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", + "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-installed-globally": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", + "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "global-dirs": "^3.0.0", + "is-path-inside": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/isstream": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "integrity": "sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==", + "dev": true, + "license": "MIT" + }, + "node_modules/joi": { + "version": "18.1.2", + "resolved": "https://registry.npmjs.org/joi/-/joi-18.1.2.tgz", + "integrity": "sha512-rF5MAmps5esSlhCA+N1b6IYHDw9j/btzGaqfgie522jS02Ju/HXBxamlXVlKEHAxoMKQL77HWI8jlqWsFuekZA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/address": "^5.1.1", + "@hapi/formula": "^3.0.2", + "@hapi/hoek": "^11.0.7", + "@hapi/pinpoint": "^2.0.1", + "@hapi/tlds": "^1.1.1", + "@hapi/topo": "^6.0.2", + "@standard-schema/spec": "^1.1.0" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", + "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==", + "dev": true, + "license": "(AFL-2.1 OR BSD-3-Clause)" + }, + "node_modules/json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==", + "dev": true, + "license": "ISC" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/jsprim": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-2.0.2.tgz", + "integrity": "sha512-gqXddjPqQ6G40VdnI6T6yObEC+pDNvyP95wdQhkWkg7crHH3km5qP1FsOXEkzEQwnz6gz5qGTn1c2Y52wP3OyQ==", + "dev": true, + "engines": [ + "node >=0.6.0" + ], + "license": "MIT", + "dependencies": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.4.0", + "verror": "1.10.0" + } + }, + "node_modules/lazy-ass": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/lazy-ass/-/lazy-ass-1.6.0.tgz", + "integrity": "sha512-cc8oEVoctTvsFZ/Oje/kGnHbpWHYBe8IAJe4C0QNc3t8uM/0Y8+erSz/7Y1ALuXTEZTMvxXwO6YbX1ey3ujiZw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "> 0.8" + } + }, + "node_modules/linkify-it": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-5.0.0.tgz", + "integrity": "sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "uc.micro": "^2.0.0" + } + }, + "node_modules/listr2": { + "version": "3.14.0", + "resolved": "https://registry.npmjs.org/listr2/-/listr2-3.14.0.tgz", + "integrity": "sha512-TyWI8G99GX9GjE54cJ+RrNMcIFBfwMPxc3XTFiAYGN4s10hWROGtOg7+O6u6LE3mNkyld7RSLE6nrKBvTfcs3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "cli-truncate": "^2.1.0", + "colorette": "^2.0.16", + "log-update": "^4.0.0", + "p-map": "^4.0.0", + "rfdc": "^1.3.0", + "rxjs": "^7.5.1", + "through": "^2.3.8", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "enquirer": ">= 2.3.0 < 3" + }, + "peerDependenciesMeta": { + "enquirer": { + "optional": true + } + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.18.1.tgz", + "integrity": "sha512-dMInicTPVE8d1e5otfwmmjlxkZoUpiVLwyeTdUsi/Caj/gfzzblBcCE5sRHV/AsjuCmxWrte2TNGSYuCeCq+0Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.once": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", + "integrity": "sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==", + "dev": true, + "license": "MIT" + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/log-update/-/log-update-4.0.0.tgz", + "integrity": "sha512-9fkkDevMefjg0mmzWFBW8YkFP91OrizzkW3diF7CpG+S2EYdy4+TVfGwz1zeF8x7hCx1ovSPTOE9Ngib74qqUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-escapes": "^4.3.0", + "cli-cursor": "^3.1.0", + "slice-ansi": "^4.0.0", + "wrap-ansi": "^6.2.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/slice-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz", + "integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "astral-regex": "^2.0.0", + "is-fullwidth-code-point": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/slice-ansi?sponsor=1" + } + }, + "node_modules/log-update/node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/lru-cache": { + "version": "11.2.6", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.6.tgz", + "integrity": "sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/lunr": { + "version": "2.3.9", + "resolved": "https://registry.npmjs.org/lunr/-/lunr-2.3.9.tgz", + "integrity": "sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==", + "dev": true, + "license": "MIT" + }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true, + "license": "ISC" + }, + "node_modules/map-stream": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/map-stream/-/map-stream-0.1.0.tgz", + "integrity": "sha512-CkYQrPYZfWnu/DAmVCpTSX/xHpKZ80eKh2lAkyA6AJTef6bW+6JpbQZN5rofum7da+SyN1bi5ctTm+lTfcCW3g==", + "dev": true + }, + "node_modules/markdown-it": { + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-14.1.1.tgz", + "integrity": "sha512-BuU2qnTti9YKgK5N+IeMubp14ZUKUUw7yeJbkjtosvHiP0AZ5c8IAgEMk79D0eC8F23r4Ac/q8cAIFdm2FtyoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1", + "entities": "^4.4.0", + "linkify-it": "^5.0.0", + "mdurl": "^2.0.0", + "punycode.js": "^2.3.1", + "uc.micro": "^2.1.0" + }, + "bin": { + "markdown-it": "bin/markdown-it.mjs" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mdurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-2.0.0.tgz", + "integrity": "sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==", + "dev": true, + "license": "MIT" + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dev": true, + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-4.2.1.tgz", + "integrity": "sha512-9Uq1ChtSZO+Mxa/CL1eGizn2vRn3MlLgzhT0Iz8zaY8NdvxvB0d5QdPFmCKf7JKA9Lerx5vRrnwO03jsSfGG9g==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=8" + } + }, + "node_modules/minizlib": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^3.0.0", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/minizlib/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", + "dev": true, + "license": "MIT", + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/mocha": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-9.2.2.tgz", + "integrity": "sha512-L6XC3EdwT6YrIk0yXpavvLkn8h+EU+Y5UcCHKECyMbdUIxyMuZj4bX4U9e1nvnvUUvQVsV2VHQr5zLdcUkhW/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@ungap/promise-all-settled": "1.1.2", + "ansi-colors": "4.1.1", + "browser-stdout": "1.3.1", + "chokidar": "3.5.3", + "debug": "4.3.3", + "diff": "5.0.0", + "escape-string-regexp": "4.0.0", + "find-up": "5.0.0", + "glob": "7.2.0", + "growl": "1.10.5", + "he": "1.2.0", + "js-yaml": "4.1.0", + "log-symbols": "4.1.0", + "minimatch": "4.2.1", + "ms": "2.1.3", + "nanoid": "3.3.1", + "serialize-javascript": "6.0.0", + "strip-json-comments": "3.1.1", + "supports-color": "8.1.1", + "which": "2.0.2", + "workerpool": "6.2.0", + "yargs": "16.2.0", + "yargs-parser": "20.2.4", + "yargs-unparser": "2.0.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/mochajs" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.1.tgz", + "integrity": "sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw==", + "dev": true, + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ospath": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/ospath/-/ospath-1.2.2.tgz", + "integrity": "sha512-o6E5qJV5zkAbIDNhGSIlyOhScKXgQrSRMilfph0clDfM0nEnBOlKlH4sWDmG95BW/CvwNz0vmm7dJVtU2KlMiA==", + "dev": true, + "license": "MIT" + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true, + "license": "BlueOak-1.0.0" + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-scurry": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-2.0.2.tgz", + "integrity": "sha512-3O/iVVsJAPsOnpwWIeD+d6z/7PmqApyQePUtCndjatj/9I5LylHvt5qluFaBT3I5h3r1ejfR056c+FCv+NnNXg==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^11.0.0", + "minipass": "^7.1.2" + }, + "engines": { + "node": "18 || 20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/minipass": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.3.tgz", + "integrity": "sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/pause-stream": { + "version": "0.0.11", + "resolved": "https://registry.npmjs.org/pause-stream/-/pause-stream-0.0.11.tgz", + "integrity": "sha512-e3FBlXLmN/D1S+zHzanP4E/4Z60oFAa3O051qt1pxa7DEJWKAyil6upYVXCWadEnuoqa4Pkc9oUx9zsxYeRv8A==", + "dev": true, + "license": [ + "MIT", + "Apache2" + ], + "dependencies": { + "through": "~2.3" + } + }, + "node_modules/pend": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", + "integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==", + "dev": true, + "license": "MIT" + }, + "node_modules/performance-now": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", + "integrity": "sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==", + "dev": true, + "license": "MIT" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pretty-bytes": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-5.6.0.tgz", + "integrity": "sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/proxy-from-env": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.0.0.tgz", + "integrity": "sha512-F2JHgJQ1iqwnHDcQjVBsq3n/uoaFL+iPW/eAeL7kVxy/2RrWaN4WroKjjvbsoRtv0ftelNyC01bjRhn/bhcf4A==", + "dev": true, + "license": "MIT" + }, + "node_modules/ps-tree": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/ps-tree/-/ps-tree-1.2.0.tgz", + "integrity": "sha512-0VnamPPYHl4uaU/nSFeZZpR21QAWRz+sRv4iW9+v/GS/J5U5iZB5BNN6J0RMoOvdx2gWM2+ZFMIm58q24e4UYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "event-stream": "=3.3.4" + }, + "bin": { + "ps-tree": "bin/ps-tree.js" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/pump": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.4.tgz", + "integrity": "sha512-VS7sjc6KR7e1ukRFhQSY5LM2uBWAUPiOPa/A3mkKmiMwSmRFUITt0xuj+/lesgnCv+dPIEYlkzrcyXgquIHMcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/punycode.js": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode.js/-/punycode.js-2.3.1.tgz", + "integrity": "sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/qs": { + "version": "6.14.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.2.tgz", + "integrity": "sha512-V/yCWTTF7VJ9hIh18Ugr2zhJMP01MY7c5kh4J870L7imm6/DIzBsNLTXzMwUA3yZ5b/KBqLx8Kp3uRvd7xSe3Q==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/request-progress": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/request-progress/-/request-progress-3.0.0.tgz", + "integrity": "sha512-MnWzEHHaxHO2iWiQuHrUPBi/1WeBf5PkxQqNyNvLl9VAYSdXkP8tQ3pBSeCPD+yw0v0Aq1zosWLz0BdeXpWwZg==", + "dev": true, + "license": "MIT", + "dependencies": { + "throttleit": "^1.0.0" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/restore-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/rfdc": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.4.1.tgz", + "integrity": "sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA==", + "dev": true, + "license": "MIT" + }, + "node_modules/rimraf": { + "version": "6.1.3", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-6.1.3.tgz", + "integrity": "sha512-LKg+Cr2ZF61fkcaK1UdkH2yEBBKnYjTyWzTJT6KNPcSPaiT7HSdhtMXQuN5wkTX0Xu72KQ1l8S42rlmexS2hSA==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "glob": "^13.0.3", + "package-json-from-dist": "^1.0.1" + }, + "bin": { + "rimraf": "dist/esm/bin.mjs" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/balanced-match": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz", + "integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/rimraf/node_modules/brace-expansion": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.4.tgz", + "integrity": "sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^4.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/rimraf/node_modules/glob": { + "version": "13.0.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-13.0.6.tgz", + "integrity": "sha512-Wjlyrolmm8uDpm/ogGyXZXb1Z+Ca2B8NbJwqBVg0axK9GbBeoS7yGV6vjXnYdGm6X53iehEuxxbyiKp8QmN4Vw==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "minimatch": "^10.2.2", + "minipass": "^7.1.3", + "path-scurry": "^2.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/minimatch": { + "version": "10.2.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.4.tgz", + "integrity": "sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "brace-expansion": "^5.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/minipass": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.3.tgz", + "integrity": "sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/rxjs": { + "version": "7.8.2", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz", + "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true, + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", + "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.1.tgz", + "integrity": "sha512-mjn/0bi/oUURjc5Xl7IaWi/OJJJumuoJFQJfDDyO46+hBWsfaVM65TBHq2eoZBhzl9EchxOijpkbRC8SVBQU0w==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/slice-ansi": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-3.0.0.tgz", + "integrity": "sha512-pSyv7bSTC7ig9Dcgbw9AuRNUb5k5V6oDudjZoMBSr13qpLBG7tB+zgCkARjq7xIUgdz5P1Qe8u+rSGdouOOIyQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "astral-regex": "^2.0.0", + "is-fullwidth-code-point": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/split": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/split/-/split-0.3.3.tgz", + "integrity": "sha512-wD2AeVmxXRBoX44wAycgjVpMhvbwdI2aZjCkvfNcH1YqHQvJVa1duWc73OyVGJUc05fhFaTZeQ/PYsrmyH0JVA==", + "dev": true, + "license": "MIT", + "dependencies": { + "through": "2" + }, + "engines": { + "node": "*" + } + }, + "node_modules/sshpk": { + "version": "1.18.0", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.18.0.tgz", + "integrity": "sha512-2p2KJZTSqQ/I3+HX42EpYOa2l3f8Erv8MWKsy2I9uf4wA7yFIkXRffYdsx86y6z4vHtV8u7g+pPlr8/4ouAxsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + }, + "bin": { + "sshpk-conv": "bin/sshpk-conv", + "sshpk-sign": "bin/sshpk-sign", + "sshpk-verify": "bin/sshpk-verify" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/start-server-and-test": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/start-server-and-test/-/start-server-and-test-2.1.5.tgz", + "integrity": "sha512-A/SbXpgXE25ScSkpLLqvGvVZT0ykN6+AzS8tVqMBCTxbJy2Nwuen59opT+afalK5aS+AuQmZs0EsLwjnuDN+/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "arg": "^5.0.2", + "bluebird": "3.7.2", + "check-more-types": "2.24.0", + "debug": "4.4.3", + "execa": "5.1.1", + "lazy-ass": "1.6.0", + "ps-tree": "1.2.0", + "wait-on": "9.0.4" + }, + "bin": { + "server-test": "src/bin/start.js", + "start-server-and-test": "src/bin/start.js", + "start-test": "src/bin/start.js" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/start-server-and-test/node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true, + "license": "MIT" + }, + "node_modules/start-server-and-test/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/start-server-and-test/node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/start-server-and-test/node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/start-server-and-test/node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/stream-combiner": { + "version": "0.0.4", + "resolved": "https://registry.npmjs.org/stream-combiner/-/stream-combiner-0.0.4.tgz", + "integrity": "sha512-rT00SPnTVyRsaSz5zgSPma/aHSOic5U1prhYdRy5HS2kTZviFpmDgzilbtsJsxiroqACmayynDN/9VzIbX5DOw==", + "dev": true, + "license": "MIT", + "dependencies": { + "duplexer": "~0.1.1" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/tar": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "deprecated": "Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", + "dev": true, + "license": "ISC", + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/throttleit": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/throttleit/-/throttleit-1.0.1.tgz", + "integrity": "sha512-vDZpf9Chs9mAdfY046mcPt8fg5QSZr37hEH4TXYBnDF+izxgrbRGUAAaBvIk/fJm9aOFCGFd1EsNg5AZCbnQCQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tldts": { + "version": "6.1.86", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-6.1.86.tgz", + "integrity": "sha512-WMi/OQ2axVTf/ykqCQgXiIct+mSQDFdH2fkwhPwgEwvJ1kSzZRiinb0zF2Xb8u4+OqPChmyI6MEu4EezNJz+FQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "tldts-core": "^6.1.86" + }, + "bin": { + "tldts": "bin/cli.js" + } + }, + "node_modules/tldts-core": { + "version": "6.1.86", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-6.1.86.tgz", + "integrity": "sha512-Je6p7pkk+KMzMv2XXKmAE3McmolOQFdxkKw0R8EYNr7sELW46JqnNeTX8ybPiQgvg1ymCoF8LXs5fzFaZvJPTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/tmp": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.5.tgz", + "integrity": "sha512-voyz6MApa1rQGUxT3E+BK7/ROe8itEx7vD8/HEvt4xwXucvQ5G5oeEiHkmHZJuBO21RpOf+YYm9MOivj709jow==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.14" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tough-cookie": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-5.1.2.tgz", + "integrity": "sha512-FVDYdxtnj0G6Qm/DhNPSb8Ju59ULcup3tuJxkFb5K8Bv2pUXILbf0xZWU8PX8Ov19OXljbUyveOFwRMwkXzO+A==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tldts": "^6.1.32" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + "dev": true, + "license": "MIT" + }, + "node_modules/tree-kill": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz", + "integrity": "sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==", + "dev": true, + "license": "MIT", + "bin": { + "tree-kill": "cli.js" + } + }, + "node_modules/ts-mocha": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/ts-mocha/-/ts-mocha-9.0.2.tgz", + "integrity": "sha512-WyQjvnzwrrubl0JT7EC1yWmNpcsU3fOuBFfdps30zbmFBgKniSaSOyZMZx+Wq7kytUs5CY+pEbSYEbGfIKnXTw==", + "dev": true, + "license": "MIT", + "dependencies": { + "ts-node": "7.0.1" + }, + "bin": { + "ts-mocha": "bin/ts-mocha" + }, + "engines": { + "node": ">= 6.X.X" + }, + "optionalDependencies": { + "tsconfig-paths": "^3.5.0" + }, + "peerDependencies": { + "mocha": "^3.X.X || ^4.X.X || ^5.X.X || ^6.X.X || ^7.X.X || ^8.X.X || ^9.X.X" + } + }, + "node_modules/ts-mocha/node_modules/diff": { + "version": "3.5.1", + "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.1.tgz", + "integrity": "sha512-Z3u54A8qGyqFOSr2pk0ijYs8mOE9Qz8kTvtKeBI+upoG9j04Sq+oI7W8zAJiQybDcESET8/uIdHzs0p3k4fZlw==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/ts-mocha/node_modules/json5": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz", + "integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "minimist": "^1.2.0" + }, + "bin": { + "json5": "lib/cli.js" + } + }, + "node_modules/ts-mocha/node_modules/mkdirp": { + "version": "0.5.6", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz", + "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.6" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/ts-mocha/node_modules/ts-node": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-7.0.1.tgz", + "integrity": "sha512-BVwVbPJRspzNh2yfslyT1PSbl5uIk03EZlb493RKHN4qej/D06n1cEhjlOJG69oFsE7OT8XjpTUcYf6pKTLMhw==", + "dev": true, + "license": "MIT", + "dependencies": { + "arrify": "^1.0.0", + "buffer-from": "^1.1.0", + "diff": "^3.1.0", + "make-error": "^1.1.1", + "minimist": "^1.2.0", + "mkdirp": "^0.5.1", + "source-map-support": "^0.5.6", + "yn": "^2.0.0" + }, + "bin": { + "ts-node": "dist/bin.js" + }, + "engines": { + "node": ">=4.2.0" + } + }, + "node_modules/ts-mocha/node_modules/tsconfig-paths": { + "version": "3.15.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz", + "integrity": "sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@types/json5": "^0.0.29", + "json5": "^1.0.2", + "minimist": "^1.2.6", + "strip-bom": "^3.0.0" + } + }, + "node_modules/ts-mocha/node_modules/yn": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yn/-/yn-2.0.0.tgz", + "integrity": "sha512-uTv8J/wiWTgUTg+9vLTi//leUl5vDQS6uii/emeTb2ssY7vl6QWf2fFbIIGjnhjvbdKlU0ed7QPgY1htTC86jQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/ts-node": { + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, + "node_modules/ts-node/node_modules/diff": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.4.tgz", + "integrity": "sha512-X07nttJQkwkfKfvTPG/KSnE2OMdcUCao6+eXF3wmnIQRn2aPAHH3VxDbDOdegkd6JbPsXqShpvEOHfAT+nCNwQ==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/tsconfig-paths": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-4.2.0.tgz", + "integrity": "sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==", + "dev": true, + "license": "MIT", + "dependencies": { + "json5": "^2.2.2", + "minimist": "^1.2.6", + "strip-bom": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "dev": true, + "license": "0BSD" + }, + "node_modules/tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": "*" + } + }, + "node_modules/tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==", + "dev": true, + "license": "Unlicense" + }, + "node_modules/type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=8" + } + }, + "node_modules/typedoc": { + "version": "0.28.17", + "resolved": "https://registry.npmjs.org/typedoc/-/typedoc-0.28.17.tgz", + "integrity": "sha512-ZkJ2G7mZrbxrKxinTQMjFqsCoYY6a5Luwv2GKbTnBCEgV2ihYm5CflA9JnJAwH0pZWavqfYxmDkFHPt4yx2oDQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@gerrit0/mini-shiki": "^3.17.0", + "lunr": "^2.3.9", + "markdown-it": "^14.1.0", + "minimatch": "^9.0.5", + "yaml": "^2.8.1" + }, + "bin": { + "typedoc": "bin/typedoc" + }, + "engines": { + "node": ">= 18", + "pnpm": ">= 10" + }, + "peerDependencies": { + "typescript": "5.0.x || 5.1.x || 5.2.x || 5.3.x || 5.4.x || 5.5.x || 5.6.x || 5.7.x || 5.8.x || 5.9.x" + } + }, + "node_modules/typedoc-plugin-markdown": { + "version": "4.10.0", + "resolved": "https://registry.npmjs.org/typedoc-plugin-markdown/-/typedoc-plugin-markdown-4.10.0.tgz", + "integrity": "sha512-psrg8Rtnv4HPWCsoxId+MzEN8TVK5jeKCnTbnGAbTBqcDapR9hM41bJT/9eAyKn9C2MDG9Qjh3MkltAYuLDoXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "typedoc": "0.28.x" + } + }, + "node_modules/typedoc/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/typedoc/node_modules/minimatch": { + "version": "9.0.9", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.9.tgz", + "integrity": "sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.2" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/uc.micro": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-2.1.0.tgz", + "integrity": "sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==", + "dev": true, + "license": "MIT" + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/untildify": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/untildify/-/untildify-4.0.0.tgz", + "integrity": "sha512-KK8xQ1mkzZeg9inewmFVDNkg3l5LUhoq9kN6iWYB/CC9YMG8HA+c1Q8HwDe6dEX7kErrEVNVBO3fWsVq5iDgtw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "dev": true, + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", + "dev": true, + "license": "MIT" + }, + "node_modules/valibot": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/valibot/-/valibot-1.2.0.tgz", + "integrity": "sha512-mm1rxUsmOxzrwnX5arGS+U4T25RdvpPjPN4yR0u9pUBov9+zGVtO84tif1eY4r6zWxVxu3KzIyknJy3rxfRZZg==", + "license": "MIT", + "peer": true, + "peerDependencies": { + "typescript": ">=5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==", + "dev": true, + "engines": [ + "node >=0.6.0" + ], + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, + "node_modules/wait-on": { + "version": "9.0.4", + "resolved": "https://registry.npmjs.org/wait-on/-/wait-on-9.0.4.tgz", + "integrity": "sha512-k8qrgfwrPVJXTeFY8tl6BxVHiclK11u72DVKhpybHfUL/K6KM4bdyK9EhIVYGytB5MJe/3lq4Tf0hrjM+pvJZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "axios": "^1.13.5", + "joi": "^18.0.2", + "lodash": "^4.17.23", + "minimist": "^1.2.8", + "rxjs": "^7.8.2" + }, + "bin": { + "wait-on": "bin/wait-on" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/wasm-opt": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/wasm-opt/-/wasm-opt-1.4.0.tgz", + "integrity": "sha512-wIsxxp0/FOSphokH4VOONy1zPkVREQfALN+/JTvJPK8gFSKbsmrcfECu2hT7OowqPfb4WEMSMceHgNL0ipFRyw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "node-fetch": "^2.6.9", + "tar": "^6.1.13" + }, + "bin": { + "wasm-opt": "bin/wasm-opt.js" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + "dev": true, + "license": "BSD-2-Clause" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/workerpool": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.2.0.tgz", + "integrity": "sha512-Rsk5qQHJ9eowMH28Jwhe8HEbmdYDX4lwoMWshiCXugjtHqMD9ZbiqSDLxcsfdqsETPzVUtX5s1Z5kStiIM6l4A==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, + "node_modules/yaml": { + "version": "2.8.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.2.tgz", + "integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==", + "dev": true, + "license": "ISC", + "bin": { + "yaml": "bin.mjs" + }, + "engines": { + "node": ">= 14.6" + }, + "funding": { + "url": "https://github.com/sponsors/eemeli" + } + }, + "node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.4", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.4.tgz", + "integrity": "sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-unparser": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", + "dev": true, + "license": "MIT", + "dependencies": { + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yauzl": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", + "integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-crc32": "~0.2.3", + "fd-slicer": "~1.1.0" + } + }, + "node_modules/yn": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", + "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/bindings/wasm/audit_trail_wasm/package.json b/bindings/wasm/audit_trail_wasm/package.json new file mode 100644 index 00000000..d8efcb5f --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/package.json @@ -0,0 +1,73 @@ +{ + "name": "@iota/audit-trail", + "author": "IOTA Foundation ", + "description": "WASM bindings for IOTA Audit Trail. To be used in JavaScript/TypeScript.", + "homepage": "https://www.iota.org", + "version": "0.1.0-alpha", + "license": "Apache-2.0", + "repository": { + "type": "git", + "url": "git+https://github.com/iotaledger/notarization.git" + }, + "directories": { + "example": "examples" + }, + "scripts": { + "build:src": "cargo build --lib --release --target wasm32-unknown-unknown --target-dir ../target", + "build:src:nodejs": "cargo build --lib --release --target wasm32-unknown-unknown --target-dir ../target", + "prebundle:nodejs": "rimraf node", + "bundle:nodejs": "wasm-bindgen ../target/wasm32-unknown-unknown/release/audit_trail_wasm.wasm --typescript --weak-refs --target nodejs --out-dir node && node ../build/node audit_trail_wasm && tsc --project ./lib/tsconfig.json && node ../build/replace_paths ./lib/tsconfig.json node audit_trail_wasm", + "prebundle:web": "rimraf web", + "bundle:web": "wasm-bindgen ../target/wasm32-unknown-unknown/release/audit_trail_wasm.wasm --typescript --target web --out-dir web && node ../build/web audit_trail_wasm && tsc --project ./lib/tsconfig.web.json && node ../build/replace_paths ./lib/tsconfig.web.json web audit_trail_wasm", + "build:nodejs": "npm run build:src:nodejs && npm run bundle:nodejs && wasm-opt -O node/audit_trail_wasm_bg.wasm -o node/audit_trail_wasm_bg.wasm", + "build:web": "npm run build:src && npm run bundle:web && wasm-opt -O web/audit_trail_wasm_bg.wasm -o web/audit_trail_wasm_bg.wasm", + "build:docs": "typedoc && npm run fix_docs", + "build:examples:web": "tsc --project ./examples/tsconfig.web.json && node ../build/replace_paths ./tsconfig.web.json dist audit_trail_wasm/examples resolve", + "build": "npm run build:web && npm run build:nodejs && npm run build:docs", + "example:node": "ts-node --project ./examples/tsconfig.node.json -r tsconfig-paths/register ./examples/src/main.ts", + "test": "npm run test:node", + "test:node": "ts-mocha -r tsconfig-paths/register -p ./examples/tsconfig.node.json ./examples/src/tests.ts --parallel --jobs 4 --retries 3 --timeout 180000 --exit", + "test:browser": "start-server-and-test example:web http://0.0.0.0:5173 'cypress run --headless'", + "test:browser:firefox": "start-server-and-test example:web http://0.0.0.0:5173 'cypress run --headless --browser firefox'", + "test:browser:chrome": "start-server-and-test example:web http://0.0.0.0:5173 'cypress run --headless --browser chrome'", + "example:web": "npm i --prefix ./cypress/app/ && npm run dev --prefix ./cypress/app/ -- --host", + "cypress": "cypress open", + "fmt": "dprint fmt", + "fix_docs": "find ./docs/wasm/ -type f -name '*.md' -exec sed -E -i.bak -e 's/(\\.md?#([^#]*)?)#/\\1/' {} ';' -exec rm {}.bak ';'" + }, + "publishConfig": { + "access": "public" + }, + "files": [ + "web/*", + "node/*" + ], + "devDependencies": { + "@types/mocha": "^9.1.0", + "@types/node": "^22.0.0", + "cypress": "^14.2.0", + "dprint": "^0.33.0", + "mocha": "^9.2.0", + "rimraf": "^6.0.1", + "start-server-and-test": "^2.0.11", + "ts-mocha": "^9.0.2", + "ts-node": "^10.9.2", + "tsconfig-paths": "^4.1.0", + "typedoc": "^0.28.5", + "typedoc-plugin-markdown": "^4.4.1", + "typescript": "^5.7.3", + "wasm-opt": "^1.4.0" + }, + "dependencies": { + "@iota/iota-interaction-ts": "^0.12.0" + }, + "peerDependencies": { + "@iota/iota-sdk": "^1.11.0" + }, + "config": { + "CYPRESS_VERIFY_TIMEOUT": 100000 + }, + "engines": { + "node": ">=20" + } +} diff --git a/bindings/wasm/audit_trail_wasm/rust-toolchain.toml b/bindings/wasm/audit_trail_wasm/rust-toolchain.toml new file mode 100644 index 00000000..825d39b5 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/rust-toolchain.toml @@ -0,0 +1,5 @@ +[toolchain] +channel = "stable" +components = ["rustfmt"] +targets = ["wasm32-unknown-unknown"] +profile = "minimal" diff --git a/bindings/wasm/audit_trail_wasm/src/builder.rs b/bindings/wasm/audit_trail_wasm/src/builder.rs new file mode 100644 index 00000000..0db57672 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/src/builder.rs @@ -0,0 +1,73 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use audit_trail::core::builder::AuditTrailBuilder; +use iota_interaction_ts::wasm_error::Result; +use product_common::bindings::transaction::WasmTransactionBuilder; +use product_common::bindings::utils::{into_transaction_builder, parse_wasm_iota_address}; +use product_common::bindings::WasmIotaAddress; +use wasm_bindgen::prelude::*; + +use crate::trail::WasmCreateTrail; +use crate::types::WasmLockingConfig; + +/// Trail-creation builder exposed to wasm consumers. +#[wasm_bindgen(js_name = AuditTrailBuilder, inspectable)] +pub struct WasmAuditTrailBuilder(pub(crate) AuditTrailBuilder); + +#[wasm_bindgen(js_class = AuditTrailBuilder)] +impl WasmAuditTrailBuilder { + /// Sets the initial record using a UTF-8 string payload. + #[wasm_bindgen(js_name = withInitialRecordString)] + pub fn with_initial_record_string(self, data: String, metadata: Option, tag: Option) -> Self { + Self(self.0.with_initial_record_parts(data, metadata, tag)) + } + + /// Sets the initial record using raw bytes. + #[wasm_bindgen(js_name = withInitialRecordBytes)] + pub fn with_initial_record_bytes( + self, + data: js_sys::Uint8Array, + metadata: Option, + tag: Option, + ) -> Self { + Self(self.0.with_initial_record_parts(data.to_vec(), metadata, tag)) + } + + /// Sets immutable metadata for the trail. + #[wasm_bindgen(js_name = withTrailMetadata)] + pub fn with_trail_metadata(self, name: String, description: Option) -> Self { + Self(self.0.with_trail_metadata_parts(name, description)) + } + + /// Sets mutable metadata for the trail. + #[wasm_bindgen(js_name = withUpdatableMetadata)] + pub fn with_updatable_metadata(self, metadata: String) -> Self { + Self(self.0.with_updatable_metadata(metadata)) + } + + /// Sets the locking configuration for the trail. + #[wasm_bindgen(js_name = withLockingConfig)] + pub fn with_locking_config(self, config: WasmLockingConfig) -> Self { + Self(self.0.with_locking_config(config.into())) + } + + /// Sets the canonical list of record tags owned by the trail. + #[wasm_bindgen(js_name = withRecordTags)] + pub fn with_record_tags(self, tags: Vec) -> Self { + Self(self.0.with_record_tags(tags)) + } + + /// Sets the initial admin address. + #[wasm_bindgen(js_name = withAdmin)] + pub fn with_admin(self, admin: WasmIotaAddress) -> Result { + let admin = parse_wasm_iota_address(&admin)?; + Ok(Self(self.0.with_admin(admin))) + } + + /// Finalizes the builder into a transaction wrapper. + #[wasm_bindgen(unchecked_return_type = "TransactionBuilder")] + pub fn finish(self) -> Result { + Ok(into_transaction_builder(WasmCreateTrail::new(self))) + } +} diff --git a/bindings/wasm/audit_trail_wasm/src/client.rs b/bindings/wasm/audit_trail_wasm/src/client.rs new file mode 100644 index 00000000..acc65f13 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/src/client.rs @@ -0,0 +1,180 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::anyhow; +use audit_trail::{AuditTrailClient, AuditTrailClientReadOnly, PackageOverrides}; +use iota_interaction_ts::bindings::{WasmIotaClient, WasmPublicKey, WasmTransactionSigner}; +use iota_interaction_ts::wasm_error::{wasm_error, Result, WasmResult}; +use product_common::bindings::utils::parse_wasm_object_id; +use product_common::bindings::WasmObjectID; +use product_common::core_client::{CoreClient, CoreClientReadOnly}; +use wasm_bindgen::prelude::*; + +use crate::builder::WasmAuditTrailBuilder; +use crate::client_read_only::{WasmAuditTrailClientReadOnly, WasmPackageOverrides}; +use crate::trail_handle::WasmAuditTrailHandle; + +/// Signing audit-trail client exposed to wasm consumers. +/// +/// This wraps the read-only client with a transaction signer so JS/TS consumers can build typed +/// write transactions while keeping submission and execution outside the SDK. +#[derive(Clone)] +#[wasm_bindgen(js_name = AuditTrailClient)] +pub struct WasmAuditTrailClient(pub(crate) AuditTrailClient); + +#[wasm_bindgen(js_class = AuditTrailClient)] +impl WasmAuditTrailClient { + /// Creates a signing client from an existing read-only client and signer. + #[wasm_bindgen(js_name = create)] + pub async fn new( + client: WasmAuditTrailClientReadOnly, + signer: WasmTransactionSigner, + ) -> Result { + let client = AuditTrailClient::new(client.0, signer).await.wasm_result()?; + Ok(Self(client)) + } + + /// Creates a signing client directly from an IOTA client and signer. + /// + /// Pass `package_id` when connecting to a custom deployment that is not known to the package + /// registry. + #[wasm_bindgen(js_name = createFromIotaClient)] + pub async fn create_from_iota_client( + iota_client: WasmIotaClient, + signer: WasmTransactionSigner, + package_id: Option, + ) -> Result { + let read_only = if let Some(package_id) = package_id { + let package_id = parse_wasm_object_id(&package_id)?; + AuditTrailClientReadOnly::new_with_package_overrides( + iota_client, + PackageOverrides { + audit_trail: Some(package_id), + tf_component: None, + }, + ) + .await + .wasm_result()? + } else { + AuditTrailClientReadOnly::new(iota_client).await.wasm_result()? + }; + + let client = AuditTrailClient::new(read_only, signer).await.wasm_result()?; + Ok(Self(client)) + } + + /// Creates a signing client directly from an IOTA client, signer, and full package overrides. + #[wasm_bindgen(js_name = createFromIotaClientWithPackageOverrides)] + pub async fn create_from_iota_client_with_package_overrides( + iota_client: WasmIotaClient, + signer: WasmTransactionSigner, + package_overrides: Option, + ) -> Result { + let read_only = if let Some(package_overrides) = package_overrides { + let package_overrides = PackageOverrides::try_from(package_overrides)?; + AuditTrailClientReadOnly::new_with_package_overrides(iota_client, package_overrides) + .await + .wasm_result()? + } else { + AuditTrailClientReadOnly::new(iota_client).await.wasm_result()? + }; + + let client = AuditTrailClient::new(read_only, signer).await.wasm_result()?; + Ok(Self(client)) + } + + /// Returns the sender public key associated with the signer. + #[wasm_bindgen(js_name = senderPublicKey)] + pub fn sender_public_key(&self) -> Result { + self.0.public_key().try_into() + } + + /// Returns the sender address associated with the signer. + #[wasm_bindgen(js_name = senderAddress)] + pub fn sender_address(&self) -> String { + self.0.address().to_string() + } + + /// Returns the connected network name. + #[wasm_bindgen] + pub fn network(&self) -> String { + self.0.network().to_string() + } + + /// Returns the connected chain ID. + #[wasm_bindgen(js_name = chainId)] + pub fn chain_id(&self) -> String { + self.0.chain_id().to_string() + } + + /// Returns the audit-trail package ID used by this client. + #[wasm_bindgen(js_name = packageId)] + pub fn package_id(&self) -> String { + self.0.package_id().to_string() + } + + /// Returns the `tf_components` package ID used by this client. + #[wasm_bindgen(js_name = tfComponentsPackageId)] + pub fn tf_components_package_id(&self) -> String { + self.0.tf_components_package_id().to_string() + } + + /// Returns the resolved audit-trail package history as stringified object IDs. + #[wasm_bindgen(js_name = packageHistory)] + pub fn package_history(&self) -> Vec { + self.0 + .package_history() + .into_iter() + .map(|pkg_id| pkg_id.to_string()) + .collect() + } + + /// Returns the underlying IOTA client wrapper. + #[wasm_bindgen(js_name = iotaClient)] + pub fn iota_client(&self) -> WasmIotaClient { + self.0.read_only().iota_client().clone().into_inner() + } + + /// Returns the signer used by this client. + #[wasm_bindgen] + pub fn signer(&self) -> WasmTransactionSigner { + self.0.signer().clone() + } + + /// Replaces the signer used by this client. + #[wasm_bindgen(js_name = withSigner)] + pub async fn with_signer(self, signer: WasmTransactionSigner) -> Result { + let client = self + .0 + .with_signer(signer) + .await + .map_err(|err| wasm_error(anyhow!(err.to_string())))?; + Ok(Self(client)) + } + + /// Returns the read-only view of this client. + /// + /// This is useful when a caller wants to pass the client into code that only needs read + /// capabilities. + #[wasm_bindgen(js_name = readOnly)] + pub fn read_only(&self) -> WasmAuditTrailClientReadOnly { + WasmAuditTrailClientReadOnly(self.0.read_only().clone()) + } + + /// Creates a builder for a new audit trail. + /// + /// The builder is pre-populated with the signer address as the initial admin when available. + #[wasm_bindgen(js_name = createTrail)] + pub fn create_trail(&self) -> WasmAuditTrailBuilder { + WasmAuditTrailBuilder(self.0.create_trail()) + } + + /// Returns a trail-scoped handle for the given trail object ID. + /// + /// Creating the handle is cheap. Network reads and transaction building happen on the returned + /// handle and its subsystem wrappers. + pub fn trail(&self, trail_id: WasmObjectID) -> Result { + let trail_id = parse_wasm_object_id(&trail_id)?; + Ok(WasmAuditTrailHandle::from_full(self.0.clone(), trail_id)) + } +} diff --git a/bindings/wasm/audit_trail_wasm/src/client_read_only.rs b/bindings/wasm/audit_trail_wasm/src/client_read_only.rs new file mode 100644 index 00000000..77d9db49 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/src/client_read_only.rs @@ -0,0 +1,167 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use audit_trail::{AuditTrailClientReadOnly, PackageOverrides}; +use iota_interaction_ts::bindings::WasmIotaClient; +use iota_interaction_ts::wasm_error::{Result, WasmResult}; +use product_common::bindings::utils::parse_wasm_object_id; +use product_common::bindings::WasmObjectID; +use product_common::core_client::CoreClientReadOnly; +use wasm_bindgen::prelude::*; + +use crate::trail_handle::WasmAuditTrailHandle; + +/// Package-ID overrides exposed to JavaScript and TypeScript consumers. +#[derive(Clone)] +#[wasm_bindgen(js_name = PackageOverrides, getter_with_clone, inspectable)] +pub struct WasmPackageOverrides { + /// Override for the audit-trail package ID. + #[wasm_bindgen(js_name = auditTrailPackageId)] + pub audit_trail_package_id: Option, + /// Override for the `tf_components` package ID. + #[wasm_bindgen(js_name = tfComponentsPackageId)] + pub tf_components_package_id: Option, +} + +#[wasm_bindgen(js_class = PackageOverrides)] +impl WasmPackageOverrides { + /// Creates package overrides for custom deployments. + #[wasm_bindgen(constructor)] + pub fn new( + audit_trail_package_id: Option, + tf_components_package_id: Option, + ) -> WasmPackageOverrides { + Self { + audit_trail_package_id, + tf_components_package_id, + } + } +} + +impl TryFrom for PackageOverrides { + type Error = JsValue; + + fn try_from(value: WasmPackageOverrides) -> std::result::Result { + Ok(Self { + audit_trail: value + .audit_trail_package_id + .as_ref() + .map(parse_wasm_object_id) + .transpose()?, + tf_component: value + .tf_components_package_id + .as_ref() + .map(parse_wasm_object_id) + .transpose()?, + }) + } +} + +/// Read-only audit-trail client exposed to wasm consumers. +/// +/// This is the main JS/TS entry point for package resolution and typed reads. Use [`Self::trail`] +/// to get an [`AuditTrailHandle`](crate::trail_handle::WasmAuditTrailHandle) bound to one trail +/// object. +#[derive(Clone)] +#[wasm_bindgen(js_name = AuditTrailClientReadOnly)] +pub struct WasmAuditTrailClientReadOnly(pub(crate) AuditTrailClientReadOnly); + +#[wasm_bindgen(js_class = AuditTrailClientReadOnly)] +impl WasmAuditTrailClientReadOnly { + /// Creates a read-only client by resolving package IDs from the connected network. + /// + /// This is the recommended constructor for official deployments tracked by the built-in + /// package registry. + #[wasm_bindgen(js_name = create)] + pub async fn new(iota_client: WasmIotaClient) -> Result { + let client = AuditTrailClientReadOnly::new(iota_client).await.wasm_result()?; + Ok(Self(client)) + } + + /// Creates a read-only client with explicit package overrides. + /// + /// Prefer this when your JS/TS app talks to a local deployment, preview environment, or any + /// package pair that is not yet part of the registry baked into the SDK. + #[wasm_bindgen(js_name = createWithPackageOverrides)] + pub async fn new_with_package_overrides( + iota_client: WasmIotaClient, + package_overrides: WasmPackageOverrides, + ) -> Result { + let package_overrides = PackageOverrides::try_from(package_overrides)?; + let client = AuditTrailClientReadOnly::new_with_package_overrides(iota_client, package_overrides) + .await + .wasm_result()?; + Ok(Self(client)) + } + + /// Creates a read-only client while overriding only the audit-trail package ID. + /// + /// This is a compatibility helper for existing callers that only need a single package + /// override. + #[wasm_bindgen(js_name = createWithPkgId)] + pub async fn new_with_pkg_id( + iota_client: WasmIotaClient, + package_id: WasmObjectID, + ) -> Result { + let package_id = parse_wasm_object_id(&package_id)?; + let client = AuditTrailClientReadOnly::new_with_package_overrides( + iota_client, + PackageOverrides { + audit_trail: Some(package_id), + tf_component: None, + }, + ) + .await + .wasm_result()?; + Ok(Self(client)) + } + + /// Returns the audit-trail package ID used by this client. + #[wasm_bindgen(js_name = packageId)] + pub fn package_id(&self) -> String { + self.0.package_id().to_string() + } + + /// Returns the `tf_components` package ID used by this client. + #[wasm_bindgen(js_name = tfComponentsPackageId)] + pub fn tf_components_package_id(&self) -> String { + self.0.tf_components_package_id().to_string() + } + + /// Returns the resolved audit-trail package history as stringified object IDs. + #[wasm_bindgen(js_name = packageHistory)] + pub fn package_history(&self) -> Vec { + self.0 + .package_history() + .into_iter() + .map(|pkg_id| pkg_id.to_string()) + .collect() + } + + /// Returns the connected network name. + #[wasm_bindgen] + pub fn network(&self) -> String { + self.0.network().to_string() + } + + /// Returns the connected chain ID. + #[wasm_bindgen(js_name = chainId)] + pub fn chain_id(&self) -> String { + self.0.chain_id().to_string() + } + + /// Returns the underlying IOTA client wrapper. + #[wasm_bindgen(js_name = iotaClient)] + pub fn iota_client(&self) -> WasmIotaClient { + self.0.iota_client().clone().into_inner() + } + + /// Returns a trail-scoped handle for the given trail object ID. + /// + /// Creating the handle is cheap. Reads only happen when you call methods on the returned + /// handle. + pub fn trail(&self, trail_id: WasmObjectID) -> Result { + let trail_id = parse_wasm_object_id(&trail_id)?; + Ok(WasmAuditTrailHandle::from_read_only(self.0.clone(), trail_id)) + } +} diff --git a/bindings/wasm/audit_trail_wasm/src/lib.rs b/bindings/wasm/audit_trail_wasm/src/lib.rs new file mode 100644 index 00000000..8e19f317 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/src/lib.rs @@ -0,0 +1,41 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +#![doc = include_str!("../README.md")] +#![warn(rustdoc::all)] +#![allow(deprecated)] +#![allow(clippy::upper_case_acronyms)] +#![allow(clippy::drop_non_drop)] +#![allow(clippy::unused_unit)] +#![allow(clippy::await_holding_refcell_ref)] + +use wasm_bindgen::prelude::*; +use wasm_bindgen::JsValue; + +pub(crate) mod builder; +pub(crate) mod client; +pub(crate) mod client_read_only; +mod trail; +pub(crate) mod trail_handle; +pub(crate) mod types; + +/// Shared wasm bindings re-exported from `product_common`. +pub use product_common::bindings::*; + +/// Installs the panic hook used by the wasm bindings. +#[wasm_bindgen(start)] +pub fn start() -> std::result::Result<(), JsValue> { + console_error_panic_hook::set_once(); + Ok(()) +} + +#[wasm_bindgen(typescript_custom_section)] +const CUSTOM_IMPORTS: &str = r#" +import { + Transaction, + TransactionOutput, + TransactionBuilder, + CoreClient, + CoreClientReadOnly +} from '../lib/index'; +"#; diff --git a/bindings/wasm/audit_trail_wasm/src/trail.rs b/bindings/wasm/audit_trail_wasm/src/trail.rs new file mode 100644 index 00000000..dc9c995d --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/src/trail.rs @@ -0,0 +1,637 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use audit_trail::core::access::{ + CleanupRevokedCapabilities, CreateRole, DeleteRole, DestroyCapability, DestroyInitialAdminCapability, + IssueCapability, RevokeCapability, RevokeInitialAdminCapability, UpdateRole, +}; +use audit_trail::core::create::{CreateTrail, TrailCreated}; +use audit_trail::core::locking::{ + UpdateDeleteRecordWindow, UpdateDeleteTrailLock, UpdateLockingConfig, UpdateWriteLock, +}; +use audit_trail::core::records::{AddRecord, DeleteRecord, DeleteRecordsBatch}; +use audit_trail::core::tags::{AddRecordTag, RemoveRecordTag}; +use audit_trail::core::trail::{DeleteAuditTrail, Migrate, UpdateMetadata}; +use audit_trail::core::types::{ + AuditTrailDeleted, CapabilityDestroyed, CapabilityIssued, CapabilityRevoked, OnChainAuditTrail, RecordAdded, + RecordDeleted, RoleCreated, RoleDeleted, RoleUpdated, +}; +use iota_interaction_ts::bindings::{WasmIotaTransactionBlockEffects, WasmIotaTransactionBlockEvents}; +use iota_interaction_ts::core_client::WasmCoreClientReadOnly; +use iota_interaction_ts::wasm_error::{Result, WasmResult}; +use product_common::bindings::core_client::WasmManagedCoreClientReadOnly; +use product_common::bindings::utils::{apply_with_events, build_programmable_transaction}; +use wasm_bindgen::prelude::*; + +use crate::builder::WasmAuditTrailBuilder; +use crate::types::{ + WasmAuditTrailDeleted, WasmCapabilityDestroyed, WasmCapabilityIssued, WasmCapabilityRevoked, WasmEmpty, + WasmImmutableMetadata, WasmLinkedTable, WasmLockingConfig, WasmRecordAdded, WasmRecordDeleted, WasmRecordTagEntry, + WasmRoleCreated, WasmRoleDeleted, WasmRoleMap, WasmRoleUpdated, +}; + +/// Read-only view of an on-chain audit trail for wasm consumers. +#[wasm_bindgen(js_name = OnChainAuditTrail, inspectable)] +#[derive(Clone)] +pub struct WasmOnChainAuditTrail(pub(crate) OnChainAuditTrail); + +#[wasm_bindgen(js_class = OnChainAuditTrail)] +impl WasmOnChainAuditTrail { + pub(crate) fn new(trail: OnChainAuditTrail) -> Self { + Self(trail) + } + + /// Returns the trail object ID. + #[wasm_bindgen(getter)] + pub fn id(&self) -> String { + self.0.id.id.to_string() + } + + /// Returns the creator address. + #[wasm_bindgen(getter)] + pub fn creator(&self) -> String { + self.0.creator.to_string() + } + + /// Returns the creation timestamp in milliseconds. + #[wasm_bindgen(js_name = createdAt, getter)] + pub fn created_at(&self) -> u64 { + self.0.created_at + } + + /// Returns the current record sequence counter. + #[wasm_bindgen(js_name = sequenceNumber, getter)] + pub fn sequence_number(&self) -> u64 { + self.0.sequence_number + } + + /// Returns the active locking configuration. + #[wasm_bindgen(js_name = lockingConfig, getter)] + pub fn locking_config(&self) -> WasmLockingConfig { + self.0.locking_config.clone().into() + } + + /// Returns the record linked-table metadata. + #[wasm_bindgen(getter)] + pub fn records(&self) -> WasmLinkedTable { + self.0.records.clone().into() + } + + /// Returns the trail-owned record tags together with usage counts. + #[wasm_bindgen(getter)] + pub fn tags(&self) -> Vec { + let mut tags: Vec = self + .0 + .tags + .iter() + .map(|(tag, usage_count)| (tag.clone(), *usage_count).into()) + .collect(); + tags.sort_unstable_by(|left, right| left.tag.cmp(&right.tag)); + tags + } + + /// Returns the trail role map. + #[wasm_bindgen(getter)] + pub fn roles(&self) -> WasmRoleMap { + self.0.roles.clone().into() + } + + /// Returns immutable metadata when present. + #[wasm_bindgen(js_name = immutableMetadata, getter)] + pub fn immutable_metadata(&self) -> Option { + self.0.immutable_metadata.clone().map(Into::into) + } + + /// Returns mutable metadata when present. + #[wasm_bindgen(js_name = updatableMetadata, getter)] + pub fn updatable_metadata(&self) -> Option { + self.0.updatable_metadata.clone() + } + + /// Returns the on-chain version of the trail object. + #[wasm_bindgen(getter)] + pub fn version(&self) -> u64 { + self.0.version + } +} + +impl From for WasmOnChainAuditTrail { + fn from(value: OnChainAuditTrail) -> Self { + Self::new(value) + } +} + +async fn apply_trail_created( + tx: CreateTrail, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, +) -> Result { + let managed_client = WasmManagedCoreClientReadOnly::from_wasm(client)?; + let created: TrailCreated = apply_with_events(tx, wasm_effects, wasm_events, client).await?; + let trail = created.fetch_audit_trail(&managed_client).await.wasm_result()?; + Ok(trail.into()) +} + +/// Transaction wrapper for trail creation. +#[wasm_bindgen(js_name = CreateTrail, inspectable)] +pub struct WasmCreateTrail(pub(crate) CreateTrail); + +#[wasm_bindgen(js_class = CreateTrail)] +impl WasmCreateTrail { + /// Creates a transaction wrapper from an [`AuditTrailBuilder`](crate::builder::WasmAuditTrailBuilder). + #[wasm_bindgen(constructor)] + pub fn new(builder: WasmAuditTrailBuilder) -> Self { + Self(CreateTrail::new(builder.0)) + } + + /// Builds the programmable transaction bytes for submission. + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + /// Applies transaction effects and events and then fetches the created trail object. + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + apply_trail_created(self.0, wasm_effects, wasm_events, client).await + } +} + +/// Transaction wrapper for mutable-metadata updates. +#[wasm_bindgen(js_name = UpdateMetadata, inspectable)] +pub struct WasmUpdateMetadata(pub(crate) UpdateMetadata); + +#[wasm_bindgen(js_class = UpdateMetadata)] +impl WasmUpdateMetadata { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + apply_with_events(self.0, wasm_effects, wasm_events, client).await + } +} + +/// Transaction wrapper for trail migration. +#[wasm_bindgen(js_name = Migrate, inspectable)] +pub struct WasmMigrate(pub(crate) Migrate); + +#[wasm_bindgen(js_class = Migrate)] +impl WasmMigrate { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + apply_with_events(self.0, wasm_effects, wasm_events, client).await + } +} + +/// Transaction wrapper for deleting a trail. +#[wasm_bindgen(js_name = DeleteAuditTrail, inspectable)] +pub struct WasmDeleteAuditTrail(pub(crate) DeleteAuditTrail); + +#[wasm_bindgen(js_class = DeleteAuditTrail)] +impl WasmDeleteAuditTrail { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + let event: AuditTrailDeleted = apply_with_events(self.0, wasm_effects, wasm_events, client).await?; + Ok(event.into()) + } +} + +/// Transaction wrapper for replacing the full locking configuration. +#[wasm_bindgen(js_name = UpdateLockingConfig, inspectable)] +pub struct WasmUpdateLockingConfig(pub(crate) UpdateLockingConfig); + +#[wasm_bindgen(js_class = UpdateLockingConfig)] +impl WasmUpdateLockingConfig { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + apply_with_events(self.0, wasm_effects, wasm_events, client).await + } +} + +/// Transaction wrapper for updating the delete-record window. +#[wasm_bindgen(js_name = UpdateDeleteRecordWindow, inspectable)] +pub struct WasmUpdateDeleteRecordWindow(pub(crate) UpdateDeleteRecordWindow); + +#[wasm_bindgen(js_class = UpdateDeleteRecordWindow)] +impl WasmUpdateDeleteRecordWindow { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + apply_with_events(self.0, wasm_effects, wasm_events, client).await + } +} + +/// Transaction wrapper for updating the delete-trail lock. +#[wasm_bindgen(js_name = UpdateDeleteTrailLock, inspectable)] +pub struct WasmUpdateDeleteTrailLock(pub(crate) UpdateDeleteTrailLock); + +#[wasm_bindgen(js_class = UpdateDeleteTrailLock)] +impl WasmUpdateDeleteTrailLock { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + apply_with_events(self.0, wasm_effects, wasm_events, client).await + } +} + +/// Transaction wrapper for updating the write lock. +#[wasm_bindgen(js_name = UpdateWriteLock, inspectable)] +pub struct WasmUpdateWriteLock(pub(crate) UpdateWriteLock); + +#[wasm_bindgen(js_class = UpdateWriteLock)] +impl WasmUpdateWriteLock { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + apply_with_events(self.0, wasm_effects, wasm_events, client).await + } +} + +/// Transaction wrapper for creating a role. +#[wasm_bindgen(js_name = CreateRole, inspectable)] +pub struct WasmCreateRole(pub(crate) CreateRole); + +#[wasm_bindgen(js_class = CreateRole)] +impl WasmCreateRole { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + let event: RoleCreated = apply_with_events(self.0, wasm_effects, wasm_events, client).await?; + Ok(event.into()) + } +} + +/// Transaction wrapper for updating a role. +#[wasm_bindgen(js_name = UpdateRole, inspectable)] +pub struct WasmUpdateRole(pub(crate) UpdateRole); + +#[wasm_bindgen(js_class = UpdateRole)] +impl WasmUpdateRole { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + let event: RoleUpdated = apply_with_events(self.0, wasm_effects, wasm_events, client).await?; + Ok(event.into()) + } +} + +/// Transaction wrapper for deleting a role. +#[wasm_bindgen(js_name = DeleteRole, inspectable)] +pub struct WasmDeleteRole(pub(crate) DeleteRole); + +#[wasm_bindgen(js_class = DeleteRole)] +impl WasmDeleteRole { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + let event: RoleDeleted = apply_with_events(self.0, wasm_effects, wasm_events, client).await?; + Ok(event.into()) + } +} + +/// Transaction wrapper for issuing a capability. +#[wasm_bindgen(js_name = IssueCapability, inspectable)] +pub struct WasmIssueCapability(pub(crate) IssueCapability); + +#[wasm_bindgen(js_class = IssueCapability)] +impl WasmIssueCapability { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + let event: CapabilityIssued = apply_with_events(self.0, wasm_effects, wasm_events, client).await?; + Ok(event.into()) + } +} + +/// Transaction wrapper for revoking a capability. +#[wasm_bindgen(js_name = RevokeCapability, inspectable)] +pub struct WasmRevokeCapability(pub(crate) RevokeCapability); + +#[wasm_bindgen(js_class = RevokeCapability)] +impl WasmRevokeCapability { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + let event: CapabilityRevoked = apply_with_events(self.0, wasm_effects, wasm_events, client).await?; + Ok(event.into()) + } +} + +/// Transaction wrapper for destroying a capability. +#[wasm_bindgen(js_name = DestroyCapability, inspectable)] +pub struct WasmDestroyCapability(pub(crate) DestroyCapability); + +#[wasm_bindgen(js_class = DestroyCapability)] +impl WasmDestroyCapability { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + let event: CapabilityDestroyed = apply_with_events(self.0, wasm_effects, wasm_events, client).await?; + Ok(event.into()) + } +} + +/// Transaction wrapper for destroying an initial-admin capability. +#[wasm_bindgen(js_name = DestroyInitialAdminCapability, inspectable)] +pub struct WasmDestroyInitialAdminCapability(pub(crate) DestroyInitialAdminCapability); + +#[wasm_bindgen(js_class = DestroyInitialAdminCapability)] +impl WasmDestroyInitialAdminCapability { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + let event: CapabilityDestroyed = apply_with_events(self.0, wasm_effects, wasm_events, client).await?; + Ok(event.into()) + } +} + +/// Transaction wrapper for revoking an initial-admin capability. +#[wasm_bindgen(js_name = RevokeInitialAdminCapability, inspectable)] +pub struct WasmRevokeInitialAdminCapability(pub(crate) RevokeInitialAdminCapability); + +#[wasm_bindgen(js_class = RevokeInitialAdminCapability)] +impl WasmRevokeInitialAdminCapability { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + let event: CapabilityRevoked = apply_with_events(self.0, wasm_effects, wasm_events, client).await?; + Ok(event.into()) + } +} + +/// Transaction wrapper for cleaning up expired revoked-capability entries. +#[wasm_bindgen(js_name = CleanupRevokedCapabilities, inspectable)] +pub struct WasmCleanupRevokedCapabilities(pub(crate) CleanupRevokedCapabilities); + +#[wasm_bindgen(js_class = CleanupRevokedCapabilities)] +impl WasmCleanupRevokedCapabilities { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + apply_with_events(self.0, wasm_effects, wasm_events, client).await + } +} + +/// Transaction wrapper for adding a record. +#[wasm_bindgen(js_name = AddRecord, inspectable)] +pub struct WasmAddRecord(pub(crate) AddRecord); + +#[wasm_bindgen(js_class = AddRecord)] +impl WasmAddRecord { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + let added: RecordAdded = apply_with_events(self.0, wasm_effects, wasm_events, client).await?; + Ok(added.into()) + } +} + +/// Transaction wrapper for deleting a single record. +#[wasm_bindgen(js_name = DeleteRecord, inspectable)] +pub struct WasmDeleteRecord(pub(crate) DeleteRecord); + +#[wasm_bindgen(js_class = DeleteRecord)] +impl WasmDeleteRecord { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + let deleted: RecordDeleted = apply_with_events(self.0, wasm_effects, wasm_events, client).await?; + Ok(deleted.into()) + } +} + +/// Transaction wrapper for deleting records in batch form. +#[wasm_bindgen(js_name = DeleteRecordsBatch, inspectable)] +pub struct WasmDeleteRecordsBatch(pub(crate) DeleteRecordsBatch); + +#[wasm_bindgen(js_class = DeleteRecordsBatch)] +impl WasmDeleteRecordsBatch { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + apply_with_events(self.0, wasm_effects, wasm_events, client).await + } +} + +/// Transaction wrapper for adding a record tag to the trail registry. +#[wasm_bindgen(js_name = AddRecordTag, inspectable)] +pub struct WasmAddRecordTag(pub(crate) AddRecordTag); + +#[wasm_bindgen(js_class = AddRecordTag)] +impl WasmAddRecordTag { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + apply_with_events(self.0, wasm_effects, wasm_events, client).await + } +} + +/// Transaction wrapper for removing a record tag from the trail registry. +#[wasm_bindgen(js_name = RemoveRecordTag, inspectable)] +pub struct WasmRemoveRecordTag(pub(crate) RemoveRecordTag); + +#[wasm_bindgen(js_class = RemoveRecordTag)] +impl WasmRemoveRecordTag { + #[wasm_bindgen(js_name = buildProgrammableTransaction)] + pub async fn build_programmable_transaction(&self, client: &WasmCoreClientReadOnly) -> Result> { + build_programmable_transaction(&self.0, client).await + } + + #[wasm_bindgen(js_name = applyWithEvents)] + pub async fn apply_with_events( + self, + wasm_effects: &WasmIotaTransactionBlockEffects, + wasm_events: &WasmIotaTransactionBlockEvents, + client: &WasmCoreClientReadOnly, + ) -> Result { + apply_with_events(self.0, wasm_effects, wasm_events, client).await + } +} diff --git a/bindings/wasm/audit_trail_wasm/src/trail_handle/access.rs b/bindings/wasm/audit_trail_wasm/src/trail_handle/access.rs new file mode 100644 index 00000000..76e1ce2b --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/src/trail_handle/access.rs @@ -0,0 +1,216 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::anyhow; +use audit_trail::AuditTrailClient; +use iota_interaction::types::base_types::ObjectID; +use iota_interaction_ts::bindings::WasmTransactionSigner; +use iota_interaction_ts::wasm_error::{wasm_error, Result}; +use product_common::bindings::transaction::WasmTransactionBuilder; +use product_common::bindings::utils::{into_transaction_builder, parse_wasm_object_id}; +use product_common::bindings::WasmObjectID; +use wasm_bindgen::prelude::*; + +use crate::trail::{ + WasmCleanupRevokedCapabilities, WasmCreateRole, WasmDeleteRole, WasmDestroyCapability, + WasmDestroyInitialAdminCapability, WasmIssueCapability, WasmRevokeCapability, WasmRevokeInitialAdminCapability, + WasmUpdateRole, +}; +use crate::types::{WasmCapabilityIssueOptions, WasmPermissionSet, WasmRoleTags}; + +/// Access-control API scoped to a specific trail. +#[derive(Clone)] +#[wasm_bindgen(js_name = TrailAccess, inspectable)] +pub struct WasmTrailAccess { + pub(crate) full: Option>, + pub(crate) trail_id: ObjectID, +} + +impl WasmTrailAccess { + /// Returns the writable client for access-control operations. + /// + /// Throws when this wrapper was created from `AuditTrailClientReadOnly`. + fn require_write(&self) -> Result<&AuditTrailClient> { + self.full.as_ref().ok_or_else(|| { + wasm_error(anyhow!( + "TrailAccess was created from a read-only client; this operation requires AuditTrailClient" + )) + }) + } +} + +#[wasm_bindgen(js_class = TrailAccess)] +impl WasmTrailAccess { + /// Returns a role-scoped handle for the given role name. + #[wasm_bindgen(js_name = forRole)] + pub fn for_role(&self, name: String) -> WasmRoleHandle { + WasmRoleHandle { + full: self.full.clone(), + trail_id: self.trail_id, + name, + } + } + + /// Builds a capability-revocation transaction. + #[wasm_bindgen(js_name = revokeCapability, unchecked_return_type = "TransactionBuilder")] + pub fn revoke_capability( + &self, + capability_id: WasmObjectID, + capability_valid_until: Option, + ) -> Result { + let capability_id = parse_wasm_object_id(&capability_id)?; + let tx = self + .require_write()? + .trail(self.trail_id) + .access() + .revoke_capability(capability_id, capability_valid_until) + .into_inner(); + Ok(into_transaction_builder(WasmRevokeCapability(tx))) + } + + /// Builds a capability-destruction transaction. + #[wasm_bindgen(js_name = destroyCapability, unchecked_return_type = "TransactionBuilder")] + pub fn destroy_capability(&self, capability_id: WasmObjectID) -> Result { + let capability_id = parse_wasm_object_id(&capability_id)?; + let tx = self + .require_write()? + .trail(self.trail_id) + .access() + .destroy_capability(capability_id) + .into_inner(); + Ok(into_transaction_builder(WasmDestroyCapability(tx))) + } + + /// Builds an initial-admin-capability destruction transaction. + #[wasm_bindgen(js_name = destroyInitialAdminCapability, unchecked_return_type = "TransactionBuilder")] + pub fn destroy_initial_admin_capability(&self, capability_id: WasmObjectID) -> Result { + let capability_id = parse_wasm_object_id(&capability_id)?; + let tx = self + .require_write()? + .trail(self.trail_id) + .access() + .destroy_initial_admin_capability(capability_id) + .into_inner(); + Ok(into_transaction_builder(WasmDestroyInitialAdminCapability(tx))) + } + + /// Builds an initial-admin-capability revocation transaction. + #[wasm_bindgen(js_name = revokeInitialAdminCapability, unchecked_return_type = "TransactionBuilder")] + pub fn revoke_initial_admin_capability( + &self, + capability_id: WasmObjectID, + capability_valid_until: Option, + ) -> Result { + let capability_id = parse_wasm_object_id(&capability_id)?; + let tx = self + .require_write()? + .trail(self.trail_id) + .access() + .revoke_initial_admin_capability(capability_id, capability_valid_until) + .into_inner(); + Ok(into_transaction_builder(WasmRevokeInitialAdminCapability(tx))) + } + + /// Builds a cleanup transaction for expired revoked-capability entries. + #[wasm_bindgen(js_name = cleanupRevokedCapabilities, unchecked_return_type = "TransactionBuilder")] + pub fn cleanup_revoked_capabilities(&self) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .access() + .cleanup_revoked_capabilities() + .into_inner(); + Ok(into_transaction_builder(WasmCleanupRevokedCapabilities(tx))) + } +} + +/// Role-scoped access-control API. +#[derive(Clone)] +#[wasm_bindgen(js_name = RoleHandle, inspectable)] +pub struct WasmRoleHandle { + pub(crate) full: Option>, + pub(crate) trail_id: ObjectID, + pub(crate) name: String, +} + +impl WasmRoleHandle { + /// Returns the writable client for role mutations. + /// + /// Throws when this wrapper was created from `AuditTrailClientReadOnly`. + fn require_write(&self) -> Result<&AuditTrailClient> { + self.full.as_ref().ok_or_else(|| { + wasm_error(anyhow!( + "RoleHandle was created from a read-only client; this operation requires AuditTrailClient" + )) + }) + } +} + +#[wasm_bindgen(js_class = RoleHandle)] +impl WasmRoleHandle { + /// Returns the role name represented by this handle. + #[wasm_bindgen(getter)] + pub fn name(&self) -> String { + self.name.clone() + } + + /// Builds a role-creation transaction. + #[wasm_bindgen(unchecked_return_type = "TransactionBuilder")] + pub fn create( + &self, + permissions: WasmPermissionSet, + role_tags: Option, + ) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .access() + .for_role(self.name.clone()) + .create(permissions.into(), role_tags.map(Into::into)) + .into_inner(); + Ok(into_transaction_builder(WasmCreateRole(tx))) + } + + /// Builds a capability-issuance transaction for this role. + #[wasm_bindgen(js_name = issueCapability, unchecked_return_type = "TransactionBuilder")] + pub fn issue_capability(&self, options: WasmCapabilityIssueOptions) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .access() + .for_role(self.name.clone()) + .issue_capability(options.into()) + .into_inner(); + Ok(into_transaction_builder(WasmIssueCapability(tx))) + } + + /// Builds a role-update transaction for this role. + #[wasm_bindgen(js_name = updatePermissions, unchecked_return_type = "TransactionBuilder")] + pub fn update_permissions( + &self, + permissions: WasmPermissionSet, + role_tags: Option, + ) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .access() + .for_role(self.name.clone()) + .update_permissions(permissions.into(), role_tags.map(Into::into)) + .into_inner(); + Ok(into_transaction_builder(WasmUpdateRole(tx))) + } + + /// Builds a role-deletion transaction for this role. + #[wasm_bindgen(unchecked_return_type = "TransactionBuilder")] + pub fn delete(&self) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .access() + .for_role(self.name.clone()) + .delete() + .into_inner(); + Ok(into_transaction_builder(WasmDeleteRole(tx))) + } +} diff --git a/bindings/wasm/audit_trail_wasm/src/trail_handle/locking.rs b/bindings/wasm/audit_trail_wasm/src/trail_handle/locking.rs new file mode 100644 index 00000000..6c56d997 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/src/trail_handle/locking.rs @@ -0,0 +1,100 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::anyhow; +use audit_trail::{AuditTrailClient, AuditTrailClientReadOnly}; +use iota_interaction::types::base_types::ObjectID; +use iota_interaction_ts::bindings::WasmTransactionSigner; +use iota_interaction_ts::wasm_error::{wasm_error, Result, WasmResult}; +use product_common::bindings::transaction::WasmTransactionBuilder; +use product_common::bindings::utils::into_transaction_builder; +use wasm_bindgen::prelude::*; + +use crate::trail::{ + WasmUpdateDeleteRecordWindow, WasmUpdateDeleteTrailLock, WasmUpdateLockingConfig, WasmUpdateWriteLock, +}; +use crate::types::{WasmLockingConfig, WasmLockingWindow, WasmTimeLock}; + +/// Locking API scoped to a specific trail. +#[derive(Clone)] +#[wasm_bindgen(js_name = TrailLocking, inspectable)] +pub struct WasmTrailLocking { + pub(crate) read_only: AuditTrailClientReadOnly, + pub(crate) full: Option>, + pub(crate) trail_id: ObjectID, +} + +impl WasmTrailLocking { + /// Returns the writable client for locking updates. + /// + /// Throws when this wrapper was created from `AuditTrailClientReadOnly`. + fn require_write(&self) -> Result<&AuditTrailClient> { + self.full.as_ref().ok_or_else(|| { + wasm_error(anyhow!( + "TrailLocking was created from a read-only client; this operation requires AuditTrailClient" + )) + }) + } +} + +#[wasm_bindgen(js_class = TrailLocking)] +impl WasmTrailLocking { + /// Builds a transaction that replaces the full locking configuration. + #[wasm_bindgen(unchecked_return_type = "TransactionBuilder")] + pub fn update(&self, config: WasmLockingConfig) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .locking() + .update(config.into()) + .into_inner(); + Ok(into_transaction_builder(WasmUpdateLockingConfig(tx))) + } + + /// Builds a transaction that updates only the delete-record window. + #[wasm_bindgen(js_name = updateDeleteRecordWindow, unchecked_return_type = "TransactionBuilder")] + pub fn update_delete_record_window(&self, window: WasmLockingWindow) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .locking() + .update_delete_record_window(window.into()) + .into_inner(); + Ok(into_transaction_builder(WasmUpdateDeleteRecordWindow(tx))) + } + + /// Builds a transaction that updates only the delete-trail lock. + #[wasm_bindgen(js_name = updateDeleteTrailLock, unchecked_return_type = "TransactionBuilder")] + pub fn update_delete_trail_lock(&self, lock: WasmTimeLock) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .locking() + .update_delete_trail_lock(lock.into()) + .into_inner(); + Ok(into_transaction_builder(WasmUpdateDeleteTrailLock(tx))) + } + + /// Builds a transaction that updates only the write lock. + #[wasm_bindgen(js_name = updateWriteLock, unchecked_return_type = "TransactionBuilder")] + pub fn update_write_lock(&self, lock: WasmTimeLock) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .locking() + .update_write_lock(lock.into()) + .into_inner(); + Ok(into_transaction_builder(WasmUpdateWriteLock(tx))) + } + + /// Returns whether a record is currently locked against deletion. + #[wasm_bindgen(js_name = isRecordLocked)] + pub async fn is_record_locked(&self, sequence_number: u64) -> Result { + self.read_only + .trail(self.trail_id) + .locking() + .is_record_locked(sequence_number) + .await + .wasm_result() + } +} diff --git a/bindings/wasm/audit_trail_wasm/src/trail_handle/mod.rs b/bindings/wasm/audit_trail_wasm/src/trail_handle/mod.rs new file mode 100644 index 00000000..a18a91fd --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/src/trail_handle/mod.rs @@ -0,0 +1,140 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! Trail-scoped wasm handle wrappers. + +mod access; +mod locking; +mod records; +mod tags; + +pub(crate) use access::WasmTrailAccess; +use anyhow::anyhow; +use audit_trail::{AuditTrailClient, AuditTrailClientReadOnly}; +use iota_interaction::types::base_types::ObjectID; +use iota_interaction_ts::bindings::WasmTransactionSigner; +use iota_interaction_ts::wasm_error::{wasm_error, Result, WasmResult}; +pub(crate) use locking::WasmTrailLocking; +use product_common::bindings::transaction::WasmTransactionBuilder; +use product_common::bindings::utils::into_transaction_builder; +pub(crate) use records::WasmTrailRecords; +pub(crate) use tags::WasmTrailTags; +use wasm_bindgen::prelude::*; + +use crate::trail::{WasmDeleteAuditTrail, WasmMigrate, WasmOnChainAuditTrail, WasmUpdateMetadata}; + +/// Handle bound to a specific audit-trail object. +/// +/// `AuditTrailHandle` keeps one trail ID together with the originating client so all trail-scoped +/// reads and transaction builders can be discovered from a single JS/TS value. +#[derive(Clone)] +#[wasm_bindgen(js_name = AuditTrailHandle, inspectable)] +pub struct WasmAuditTrailHandle { + pub(crate) read_only: AuditTrailClientReadOnly, + pub(crate) full: Option>, + pub(crate) trail_id: ObjectID, +} + +impl WasmAuditTrailHandle { + pub(crate) fn from_read_only(read_only: AuditTrailClientReadOnly, trail_id: ObjectID) -> Self { + Self { + read_only, + full: None, + trail_id, + } + } + + pub(crate) fn from_full(full: AuditTrailClient, trail_id: ObjectID) -> Self { + Self { + read_only: full.read_only().clone(), + full: Some(full), + trail_id, + } + } + + /// Returns the writable client when this handle came from `AuditTrailClient`. + /// + /// Throws when the handle was created from `AuditTrailClientReadOnly`. + fn require_write(&self) -> Result<&AuditTrailClient> { + self.full.as_ref().ok_or_else(|| { + wasm_error(anyhow!( + "AuditTrailHandle was created from a read-only client; this operation requires AuditTrailClient" + )) + }) + } +} + +#[wasm_bindgen(js_class = AuditTrailHandle)] +impl WasmAuditTrailHandle { + /// Loads the full on-chain trail object. + /// + /// Each call fetches a fresh snapshot from chain state. + pub async fn get(&self) -> Result { + let trail = self.read_only.trail(self.trail_id).get().await.wasm_result()?; + Ok(trail.into()) + } + + /// Builds a migration transaction for this trail. + #[wasm_bindgen(unchecked_return_type = "TransactionBuilder")] + pub fn migrate(&self) -> Result { + let tx = self.require_write()?.trail(self.trail_id).migrate().into_inner(); + Ok(into_transaction_builder(WasmMigrate(tx))) + } + + /// Builds a delete transaction for this trail. + #[wasm_bindgen(js_name = deleteAuditTrail, unchecked_return_type = "TransactionBuilder")] + pub fn delete_audit_trail(&self) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .delete_audit_trail() + .into_inner(); + Ok(into_transaction_builder(WasmDeleteAuditTrail(tx))) + } + + /// Builds a mutable-metadata update transaction for this trail. + #[wasm_bindgen(js_name = updateMetadata, unchecked_return_type = "TransactionBuilder")] + pub fn update_metadata(&self, metadata: Option) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .update_metadata(metadata) + .into_inner(); + Ok(into_transaction_builder(WasmUpdateMetadata(tx))) + } + + /// Returns the record API scoped to this trail. + pub fn records(&self) -> WasmTrailRecords { + WasmTrailRecords { + read_only: self.read_only.clone(), + full: self.full.clone(), + trail_id: self.trail_id, + } + } + + /// Returns the access-control API scoped to this trail. + pub fn access(&self) -> WasmTrailAccess { + WasmTrailAccess { + full: self.full.clone(), + trail_id: self.trail_id, + } + } + + /// Returns the locking API scoped to this trail. + pub fn locking(&self) -> WasmTrailLocking { + WasmTrailLocking { + read_only: self.read_only.clone(), + full: self.full.clone(), + trail_id: self.trail_id, + } + } + + /// Returns the tag-registry API scoped to this trail. + pub fn tags(&self) -> WasmTrailTags { + WasmTrailTags { + read_only: self.read_only.clone(), + full: self.full.clone(), + trail_id: self.trail_id, + } + } +} diff --git a/bindings/wasm/audit_trail_wasm/src/trail_handle/records.rs b/bindings/wasm/audit_trail_wasm/src/trail_handle/records.rs new file mode 100644 index 00000000..d4b646b2 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/src/trail_handle/records.rs @@ -0,0 +1,154 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::anyhow; +use audit_trail::core::types::Data as AuditTrailData; +use audit_trail::{AuditTrailClient, AuditTrailClientReadOnly}; +use iota_interaction::types::base_types::ObjectID; +use iota_interaction_ts::bindings::WasmTransactionSigner; +use iota_interaction_ts::wasm_error::{wasm_error, Result, WasmResult}; +use product_common::bindings::transaction::WasmTransactionBuilder; +use product_common::bindings::utils::into_transaction_builder; +use wasm_bindgen::prelude::*; + +use crate::trail::{WasmAddRecord, WasmDeleteRecord, WasmDeleteRecordsBatch}; +use crate::types::{WasmData, WasmEmpty, WasmPaginatedRecord, WasmRecord}; + +/// Record API scoped to a specific trail. +#[derive(Clone)] +#[wasm_bindgen(js_name = TrailRecords, inspectable)] +pub struct WasmTrailRecords { + pub(crate) read_only: AuditTrailClientReadOnly, + pub(crate) full: Option>, + pub(crate) trail_id: ObjectID, +} + +impl WasmTrailRecords { + /// Returns the writable client for record mutations. + /// + /// Throws when this wrapper was created from `AuditTrailClientReadOnly`. + fn require_write(&self) -> Result<&AuditTrailClient> { + self.full.as_ref().ok_or_else(|| { + wasm_error(anyhow!( + "TrailRecords was created from a read-only client; this operation requires AuditTrailClient" + )) + }) + } +} + +#[wasm_bindgen(js_class = TrailRecords)] +impl WasmTrailRecords { + /// Loads one record by sequence number. + pub async fn get(&self, sequence_number: u64) -> Result { + let record = self + .read_only + .trail(self.trail_id) + .records() + .get(sequence_number) + .await + .wasm_result()?; + Ok(record.into()) + } + + /// Returns the number of records currently stored in the trail. + #[wasm_bindgen(js_name = recordCount)] + pub async fn record_count(&self) -> Result { + self.read_only + .trail(self.trail_id) + .records() + .record_count() + .await + .wasm_result() + } + + /// Lists all records in sequence-number order. + pub async fn list(&self) -> Result> { + let mut records: Vec<_> = self + .read_only + .trail(self.trail_id) + .records() + .list() + .await + .wasm_result()? + .into_iter() + .collect(); + records.sort_unstable_by_key(|(sequence_number, _)| *sequence_number); + Ok(records.into_iter().map(|(_, record)| record.into()).collect()) + } + + /// Lists all records while enforcing a maximum number of entries. + #[wasm_bindgen(js_name = listWithLimit)] + pub async fn list_with_limit(&self, max_entries: usize) -> Result> { + let mut records: Vec<_> = self + .read_only + .trail(self.trail_id) + .records() + .list_with_limit(max_entries) + .await + .wasm_result()? + .into_iter() + .collect(); + records.sort_unstable_by_key(|(sequence_number, _)| *sequence_number); + Ok(records.into_iter().map(|(_, record)| record.into()).collect()) + } + + /// Loads one page of records starting at `cursor`. + #[wasm_bindgen(js_name = listPage)] + pub async fn list_page(&self, cursor: Option, limit: usize) -> Result { + let page = self + .read_only + .trail(self.trail_id) + .records() + .list_page(cursor, limit) + .await + .wasm_result()?; + Ok(page.into()) + } + + /// Executes the correction helper for a record payload. + pub async fn correct(&self, replaces: Vec, data: WasmData, metadata: Option) -> Result { + self.require_write()? + .trail(self.trail_id) + .records() + .correct(replaces, data.into(), metadata) + .await + .wasm_result()?; + Ok(WasmEmpty) + } + + /// Builds a record-add transaction. + #[wasm_bindgen(unchecked_return_type = "TransactionBuilder")] + pub fn add(&self, data: WasmData, metadata: Option, tag: Option) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .records() + .add(AuditTrailData::from(data), metadata, tag) + .into_inner(); + Ok(into_transaction_builder(WasmAddRecord(tx))) + } + + /// Builds a single-record delete transaction. + #[wasm_bindgen(unchecked_return_type = "TransactionBuilder")] + pub fn delete(&self, sequence_number: u64) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .records() + .delete(sequence_number) + .into_inner(); + Ok(into_transaction_builder(WasmDeleteRecord(tx))) + } + + /// Builds a batched record-delete transaction. + #[wasm_bindgen(js_name = deleteBatch, unchecked_return_type = "TransactionBuilder")] + pub fn delete_batch(&self, limit: u64) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .records() + .delete_records_batch(limit) + .into_inner(); + Ok(into_transaction_builder(WasmDeleteRecordsBatch(tx))) + } +} diff --git a/bindings/wasm/audit_trail_wasm/src/trail_handle/tags.rs b/bindings/wasm/audit_trail_wasm/src/trail_handle/tags.rs new file mode 100644 index 00000000..2b545938 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/src/trail_handle/tags.rs @@ -0,0 +1,67 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::anyhow; +use audit_trail::{AuditTrailClient, AuditTrailClientReadOnly}; +use iota_interaction::types::base_types::ObjectID; +use iota_interaction_ts::bindings::WasmTransactionSigner; +use iota_interaction_ts::wasm_error::{wasm_error, Result, WasmResult}; +use product_common::bindings::transaction::WasmTransactionBuilder; +use product_common::bindings::utils::into_transaction_builder; +use wasm_bindgen::prelude::*; + +use crate::trail::{WasmAddRecordTag, WasmRemoveRecordTag}; +use crate::types::WasmRecordTagEntry; + +/// Tag-registry API scoped to a specific trail. +#[derive(Clone)] +#[wasm_bindgen(js_name = TrailTags, inspectable)] +pub struct WasmTrailTags { + pub(crate) read_only: AuditTrailClientReadOnly, + pub(crate) full: Option>, + pub(crate) trail_id: ObjectID, +} + +impl WasmTrailTags { + /// Returns the writable client for tag mutations. + fn require_write(&self) -> Result<&AuditTrailClient> { + self.full.as_ref().ok_or_else(|| { + wasm_error(anyhow!( + "TrailTags was created from a read-only client; this operation requires AuditTrailClient" + )) + }) + } +} + +#[wasm_bindgen(js_class = TrailTags)] +impl WasmTrailTags { + pub async fn list(&self) -> Result> { + let trail = self.read_only.trail(self.trail_id).get().await.wasm_result()?; + let mut tags: Vec = trail + .tags + .iter() + .map(|(tag, usage_count)| (tag.clone(), *usage_count).into()) + .collect(); + tags.sort_unstable_by(|left, right| left.tag.cmp(&right.tag)); + Ok(tags) + } + + /// Builds a transaction that adds a tag to the trail registry. + #[wasm_bindgen(unchecked_return_type = "TransactionBuilder")] + pub fn add(&self, tag: String) -> Result { + let tx = self.require_write()?.trail(self.trail_id).tags().add(tag).into_inner(); + Ok(into_transaction_builder(WasmAddRecordTag(tx))) + } + + /// Builds a transaction that removes a tag from the trail registry. + #[wasm_bindgen(unchecked_return_type = "TransactionBuilder")] + pub fn remove(&self, tag: String) -> Result { + let tx = self + .require_write()? + .trail(self.trail_id) + .tags() + .remove(tag) + .into_inner(); + Ok(into_transaction_builder(WasmRemoveRecordTag(tx))) + } +} diff --git a/bindings/wasm/audit_trail_wasm/src/types.rs b/bindings/wasm/audit_trail_wasm/src/types.rs new file mode 100644 index 00000000..52cc9586 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/src/types.rs @@ -0,0 +1,1097 @@ +// Copyright 2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::{HashMap, HashSet}; + +use audit_trail::core::types::{ + AuditTrailCreated, AuditTrailDeleted, Capability, CapabilityAdminPermissions, CapabilityDestroyed, + CapabilityIssueOptions, CapabilityIssued, CapabilityRevoked, Data, ImmutableMetadata, LockingConfig, LockingWindow, + PaginatedRecord, Permission, PermissionSet, Record, RecordAdded, RecordCorrection, RecordDeleted, Role, + RoleAdminPermissions, RoleCreated, RoleDeleted, RoleMap, RoleTags, RoleUpdated, TimeLock, +}; +use iota_interaction::types::base_types::ObjectID; +use iota_interaction::types::collection_types::LinkedTable; +use js_sys::Uint8Array; +use product_common::bindings::WasmIotaAddress; +use serde::{Deserialize, Serialize}; +use wasm_bindgen::prelude::*; + +/// Placeholder wrapper used for transaction outputs that carry no value. +#[wasm_bindgen(js_name = Empty, inspectable)] +pub struct WasmEmpty; + +impl From<()> for WasmEmpty { + fn from(_: ()) -> Self { + Self + } +} + +/// JS-friendly wrapper for audit-trail record payloads. +#[wasm_bindgen(js_name = Data, inspectable)] +#[derive(Clone)] +pub struct WasmData(pub(crate) Data); + +#[wasm_bindgen(js_class = Data)] +impl WasmData { + /// Returns the underlying payload as either a string or `Uint8Array`. + #[wasm_bindgen(getter)] + pub fn value(&self) -> JsValue { + match &self.0 { + Data::Bytes(bytes) => Uint8Array::from(bytes.as_slice()).into(), + Data::Text(text) => JsValue::from(text), + } + } + + /// Returns the payload converted to a string. + #[wasm_bindgen(js_name = toString)] + pub fn to_string(&self) -> String { + match &self.0 { + Data::Bytes(bytes) => String::from_utf8_lossy(bytes).to_string(), + Data::Text(text) => text.clone(), + } + } + + /// Returns the payload converted to raw bytes. + #[wasm_bindgen(js_name = toBytes)] + pub fn to_bytes(&self) -> Vec { + match &self.0 { + Data::Bytes(bytes) => bytes.clone(), + Data::Text(text) => text.as_bytes().to_vec(), + } + } + + /// Creates a text payload. + #[wasm_bindgen(js_name = fromString)] + pub fn from_string(data: String) -> Self { + Self(Data::text(data)) + } + + /// Creates a binary payload. + #[wasm_bindgen(js_name = fromBytes)] + pub fn from_bytes(data: Uint8Array) -> Self { + Self(Data::bytes(data.to_vec())) + } +} + +impl From for WasmData { + fn from(value: Data) -> Self { + Self(value) + } +} + +impl From for Data { + fn from(value: WasmData) -> Self { + value.0 + } +} + +fn permission_sort_key(permission: Permission) -> u8 { + match permission { + Permission::DeleteAuditTrail => 0, + Permission::DeleteAllRecords => 1, + Permission::AddRecord => 2, + Permission::DeleteRecord => 3, + Permission::CorrectRecord => 4, + Permission::UpdateLockingConfig => 5, + Permission::UpdateLockingConfigForDeleteRecord => 6, + Permission::UpdateLockingConfigForDeleteTrail => 7, + Permission::UpdateLockingConfigForWrite => 8, + Permission::AddRoles => 9, + Permission::UpdateRoles => 10, + Permission::DeleteRoles => 11, + Permission::AddCapabilities => 12, + Permission::RevokeCapabilities => 13, + Permission::UpdateMetadata => 14, + Permission::DeleteMetadata => 15, + Permission::Migrate => 16, + Permission::AddRecordTags => 17, + Permission::DeleteRecordTags => 18, + } +} + +fn sorted_permissions_from_set(permissions: HashSet) -> Vec { + let mut permissions: Vec<_> = permissions.into_iter().collect(); + permissions.sort_unstable_by_key(|permission| permission_sort_key(*permission)); + permissions.into_iter().map(Into::into).collect() +} + +fn sorted_tag_names(tags: HashSet) -> Vec { + let mut tags: Vec<_> = tags.into_iter().collect(); + tags.sort_unstable(); + tags +} + +fn sorted_object_ids(ids: HashSet) -> Vec { + let mut ids: Vec<_> = ids.into_iter().map(|id| id.to_string()).collect(); + ids.sort_unstable(); + ids +} + +fn optional_object_id(id: Option) -> Option { + id.map(|id| id.to_string()) +} + +fn sorted_role_entries(roles: HashMap) -> Vec { + let mut roles: Vec<_> = roles + .into_iter() + .map(|(name, role)| WasmRolePermissionsEntry { + name, + permissions: sorted_permissions_from_set(role.permissions), + role_tags: role.data.map(Into::into), + }) + .collect(); + roles.sort_unstable_by(|left, right| left.name.cmp(&right.name)); + roles +} + +/// Permission variants exposed to wasm consumers. +#[wasm_bindgen(js_name = Permission)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub enum WasmPermission { + DeleteAuditTrail, + DeleteAllRecords, + AddRecord, + DeleteRecord, + CorrectRecord, + UpdateLockingConfig, + UpdateLockingConfigForDeleteRecord, + UpdateLockingConfigForDeleteTrail, + UpdateLockingConfigForWrite, + AddRoles, + UpdateRoles, + DeleteRoles, + AddCapabilities, + RevokeCapabilities, + UpdateMetadata, + DeleteMetadata, + Migrate, + AddRecordTags, + DeleteRecordTags, +} + +impl From for WasmPermission { + fn from(value: Permission) -> Self { + match value { + Permission::DeleteAuditTrail => Self::DeleteAuditTrail, + Permission::DeleteAllRecords => Self::DeleteAllRecords, + Permission::AddRecord => Self::AddRecord, + Permission::DeleteRecord => Self::DeleteRecord, + Permission::CorrectRecord => Self::CorrectRecord, + Permission::UpdateLockingConfig => Self::UpdateLockingConfig, + Permission::UpdateLockingConfigForDeleteRecord => Self::UpdateLockingConfigForDeleteRecord, + Permission::UpdateLockingConfigForDeleteTrail => Self::UpdateLockingConfigForDeleteTrail, + Permission::UpdateLockingConfigForWrite => Self::UpdateLockingConfigForWrite, + Permission::AddRoles => Self::AddRoles, + Permission::UpdateRoles => Self::UpdateRoles, + Permission::DeleteRoles => Self::DeleteRoles, + Permission::AddCapabilities => Self::AddCapabilities, + Permission::RevokeCapabilities => Self::RevokeCapabilities, + Permission::UpdateMetadata => Self::UpdateMetadata, + Permission::DeleteMetadata => Self::DeleteMetadata, + Permission::Migrate => Self::Migrate, + Permission::AddRecordTags => Self::AddRecordTags, + Permission::DeleteRecordTags => Self::DeleteRecordTags, + } + } +} + +impl From for Permission { + fn from(value: WasmPermission) -> Self { + match value { + WasmPermission::DeleteAuditTrail => Self::DeleteAuditTrail, + WasmPermission::DeleteAllRecords => Self::DeleteAllRecords, + WasmPermission::AddRecord => Self::AddRecord, + WasmPermission::DeleteRecord => Self::DeleteRecord, + WasmPermission::CorrectRecord => Self::CorrectRecord, + WasmPermission::UpdateLockingConfig => Self::UpdateLockingConfig, + WasmPermission::UpdateLockingConfigForDeleteRecord => Self::UpdateLockingConfigForDeleteRecord, + WasmPermission::UpdateLockingConfigForDeleteTrail => Self::UpdateLockingConfigForDeleteTrail, + WasmPermission::UpdateLockingConfigForWrite => Self::UpdateLockingConfigForWrite, + WasmPermission::AddRoles => Self::AddRoles, + WasmPermission::UpdateRoles => Self::UpdateRoles, + WasmPermission::DeleteRoles => Self::DeleteRoles, + WasmPermission::AddCapabilities => Self::AddCapabilities, + WasmPermission::RevokeCapabilities => Self::RevokeCapabilities, + WasmPermission::UpdateMetadata => Self::UpdateMetadata, + WasmPermission::DeleteMetadata => Self::DeleteMetadata, + WasmPermission::Migrate => Self::Migrate, + WasmPermission::AddRecordTags => Self::AddRecordTags, + WasmPermission::DeleteRecordTags => Self::DeleteRecordTags, + } + } +} + +/// JS-friendly wrapper for a set of permissions. +#[wasm_bindgen(js_name = PermissionSet, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmPermissionSet { + /// Permissions granted by this set. + pub permissions: Vec, +} + +#[wasm_bindgen(js_class = PermissionSet)] +impl WasmPermissionSet { + /// Creates a permission set from an explicit list of permissions. + #[wasm_bindgen(constructor)] + pub fn new(permissions: Vec) -> Self { + Self { permissions } + } + + /// Returns the recommended role-administration permission set. + #[wasm_bindgen(js_name = adminPermissions)] + pub fn admin_permissions() -> Self { + PermissionSet::admin_permissions().into() + } + + /// Returns the permissions needed to administer records. + #[wasm_bindgen(js_name = recordAdminPermissions)] + pub fn record_admin_permissions() -> Self { + PermissionSet::record_admin_permissions().into() + } + + /// Returns the permissions needed to administer locking rules. + #[wasm_bindgen(js_name = lockingAdminPermissions)] + pub fn locking_admin_permissions() -> Self { + PermissionSet::locking_admin_permissions().into() + } + + /// Returns the permissions needed to administer roles. + #[wasm_bindgen(js_name = roleAdminPermissions)] + pub fn role_admin_permissions() -> Self { + PermissionSet::role_admin_permissions().into() + } + + /// Returns the permissions needed to issue and revoke capabilities. + #[wasm_bindgen(js_name = capAdminPermissions)] + pub fn cap_admin_permissions() -> Self { + PermissionSet::cap_admin_permissions().into() + } + + /// Returns the permissions needed to administer mutable metadata. + #[wasm_bindgen(js_name = metadataAdminPermissions)] + pub fn metadata_admin_permissions() -> Self { + PermissionSet::metadata_admin_permissions().into() + } + + /// Returns the permissions needed to administer record tags. + #[wasm_bindgen(js_name = tagAdminPermissions)] + pub fn tag_admin_permissions() -> Self { + PermissionSet::tag_admin_permissions().into() + } +} + +impl From for WasmPermissionSet { + fn from(value: PermissionSet) -> Self { + Self { + permissions: sorted_permissions_from_set(value.permissions), + } + } +} + +impl From for PermissionSet { + fn from(value: WasmPermissionSet) -> Self { + Self { + permissions: value.permissions.into_iter().map(Into::into).collect(), + } + } +} + +/// Linked-table metadata for record storage. +#[wasm_bindgen(js_name = LinkedTable, getter_with_clone, inspectable)] +#[derive(Clone, Serialize, Deserialize)] +pub struct WasmLinkedTable { + /// Linked-table object ID. + pub id: String, + /// Declared number of entries in the table. + pub size: u64, + /// Sequence number of the first entry, if any. + pub head: Option, + /// Sequence number of the last entry, if any. + pub tail: Option, +} + +impl From> for WasmLinkedTable { + fn from(value: LinkedTable) -> Self { + Self { + id: value.id.to_string(), + size: value.size, + head: value.head, + tail: value.tail, + } + } +} + +/// Permission requirements for role administration. +#[wasm_bindgen(js_name = RoleAdminPermissions, getter_with_clone, inspectable)] +#[derive(Clone, Serialize, Deserialize)] +pub struct WasmRoleAdminPermissions { + pub add: WasmPermission, + pub delete: WasmPermission, + pub update: WasmPermission, +} + +impl From for WasmRoleAdminPermissions { + fn from(value: RoleAdminPermissions) -> Self { + Self { + add: value.add.into(), + delete: value.delete.into(), + update: value.update.into(), + } + } +} + +/// Permission requirements for capability administration. +#[wasm_bindgen(js_name = CapabilityAdminPermissions, getter_with_clone, inspectable)] +#[derive(Clone, Serialize, Deserialize)] +pub struct WasmCapabilityAdminPermissions { + pub add: WasmPermission, + pub revoke: WasmPermission, +} + +impl From for WasmCapabilityAdminPermissions { + fn from(value: CapabilityAdminPermissions) -> Self { + Self { + add: value.add.into(), + revoke: value.revoke.into(), + } + } +} + +/// Flattened role entry exposed inside [`WasmRoleMap`]. +#[wasm_bindgen(js_name = RolePermissionsEntry, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmRolePermissionsEntry { + pub name: String, + pub permissions: Vec, + #[wasm_bindgen(js_name = roleTags)] + pub role_tags: Option, +} + +/// Allowlisted record tags stored on a role. +#[wasm_bindgen(js_name = RoleTags, getter_with_clone, inspectable)] +#[derive(Clone, Serialize, Deserialize)] +pub struct WasmRoleTags { + /// Sorted tag names allowed by the role. + pub tags: Vec, +} + +#[wasm_bindgen(js_class = RoleTags)] +impl WasmRoleTags { + /// Creates role-tag restrictions from a list of tag names. + #[wasm_bindgen(constructor)] + pub fn new(tags: Vec) -> Self { + let mut tags = tags; + tags.sort_unstable(); + tags.dedup(); + Self { tags } + } +} + +impl From for WasmRoleTags { + fn from(value: RoleTags) -> Self { + Self { + tags: sorted_tag_names(value.tags), + } + } +} + +impl From for RoleTags { + fn from(value: WasmRoleTags) -> Self { + RoleTags::new(value.tags) + } +} + +/// Trail-owned record tag plus its usage count. +#[wasm_bindgen(js_name = RecordTagEntry, getter_with_clone, inspectable)] +#[derive(Clone, Serialize, Deserialize)] +pub struct WasmRecordTagEntry { + pub tag: String, + #[wasm_bindgen(js_name = usageCount)] + pub usage_count: u64, +} + +impl From<(String, u64)> for WasmRecordTagEntry { + fn from((tag, usage_count): (String, u64)) -> Self { + Self { tag, usage_count } + } +} + +/// JS-friendly view of the trail role map. +#[wasm_bindgen(js_name = RoleMap, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmRoleMap { + #[wasm_bindgen(js_name = targetKey)] + pub target_key: String, + pub roles: Vec, + #[wasm_bindgen(js_name = initialAdminRoleName)] + pub initial_admin_role_name: String, + #[wasm_bindgen(js_name = revokedCapabilities)] + pub revoked_capabilities: WasmObjectIdLinkedTable, + #[wasm_bindgen(js_name = initialAdminCapIds)] + pub initial_admin_cap_ids: Vec, + #[wasm_bindgen(js_name = roleAdminPermissions)] + pub role_admin_permissions: WasmRoleAdminPermissions, + #[wasm_bindgen(js_name = capabilityAdminPermissions)] + pub capability_admin_permissions: WasmCapabilityAdminPermissions, +} + +impl From for WasmRoleMap { + fn from(value: RoleMap) -> Self { + Self { + target_key: value.target_key.to_string(), + roles: sorted_role_entries(value.roles), + initial_admin_role_name: value.initial_admin_role_name, + revoked_capabilities: value.revoked_capabilities.into(), + initial_admin_cap_ids: sorted_object_ids(value.initial_admin_cap_ids), + role_admin_permissions: value.role_admin_permissions.into(), + capability_admin_permissions: value.capability_admin_permissions.into(), + } + } +} + +/// Linked-table metadata keyed by object IDs. +#[wasm_bindgen(js_name = ObjectIdLinkedTable, getter_with_clone, inspectable)] +#[derive(Clone, Serialize, Deserialize)] +pub struct WasmObjectIdLinkedTable { + pub id: String, + pub size: u64, + pub head: Option, + pub tail: Option, +} + +impl From> for WasmObjectIdLinkedTable { + fn from(value: LinkedTable) -> Self { + Self { + id: value.id.to_string(), + size: value.size, + head: optional_object_id(value.head), + tail: optional_object_id(value.tail), + } + } +} + +/// Capability issuance options exposed to wasm consumers. +#[wasm_bindgen(js_name = CapabilityIssueOptions, getter_with_clone, inspectable)] +#[derive(Clone, Default, Serialize, Deserialize)] +pub struct WasmCapabilityIssueOptions { + #[wasm_bindgen(js_name = issuedTo)] + pub issued_to: Option, + #[wasm_bindgen(js_name = validFromMs)] + pub valid_from_ms: Option, + #[wasm_bindgen(js_name = validUntilMs)] + pub valid_until_ms: Option, +} + +#[wasm_bindgen(js_class = CapabilityIssueOptions)] +impl WasmCapabilityIssueOptions { + /// Creates capability issuance options. + #[wasm_bindgen(constructor)] + pub fn new(issued_to: Option, valid_from_ms: Option, valid_until_ms: Option) -> Self { + Self { + issued_to, + valid_from_ms, + valid_until_ms, + } + } +} + +impl From for WasmCapabilityIssueOptions { + fn from(value: CapabilityIssueOptions) -> Self { + Self { + issued_to: value.issued_to.map(|address| address.to_string()), + valid_from_ms: value.valid_from_ms, + valid_until_ms: value.valid_until_ms, + } + } +} + +impl From for CapabilityIssueOptions { + fn from(value: WasmCapabilityIssueOptions) -> Self { + Self { + issued_to: value.issued_to.and_then(|address| address.parse().ok()), + valid_from_ms: value.valid_from_ms, + valid_until_ms: value.valid_until_ms, + } + } +} + +/// Capability data returned to wasm consumers. +#[wasm_bindgen(js_name = Capability, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmCapability { + pub id: String, + #[wasm_bindgen(js_name = targetKey)] + pub target_key: String, + pub role: String, + #[wasm_bindgen(js_name = issuedTo)] + pub issued_to: Option, + #[wasm_bindgen(js_name = validFrom)] + pub valid_from: Option, + #[wasm_bindgen(js_name = validUntil)] + pub valid_until: Option, +} + +impl From for WasmCapability { + fn from(value: Capability) -> Self { + Self { + id: value.id.id.to_string(), + target_key: value.target_key.to_string(), + role: value.role, + issued_to: value.issued_to.map(|address| address.to_string()), + valid_from: value.valid_from, + valid_until: value.valid_until, + } + } +} + +/// Event payload emitted when a trail is created. +#[wasm_bindgen(js_name = AuditTrailCreated, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmAuditTrailCreated { + #[wasm_bindgen(js_name = trailId)] + pub trail_id: String, + pub creator: WasmIotaAddress, + pub timestamp: u64, +} + +impl From for WasmAuditTrailCreated { + fn from(value: AuditTrailCreated) -> Self { + Self { + trail_id: value.trail_id.to_string(), + creator: value.creator.to_string(), + timestamp: value.timestamp, + } + } +} + +/// Event payload emitted when a trail is deleted. +#[wasm_bindgen(js_name = AuditTrailDeleted, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmAuditTrailDeleted { + #[wasm_bindgen(js_name = trailId)] + pub trail_id: String, + pub timestamp: u64, +} + +impl From for WasmAuditTrailDeleted { + fn from(value: AuditTrailDeleted) -> Self { + Self { + trail_id: value.trail_id.to_string(), + timestamp: value.timestamp, + } + } +} + +/// Event payload emitted when a record is added. +#[wasm_bindgen(js_name = RecordAdded, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmRecordAdded { + #[wasm_bindgen(js_name = trailId)] + pub trail_id: String, + #[wasm_bindgen(js_name = sequenceNumber)] + pub sequence_number: u64, + #[wasm_bindgen(js_name = addedBy)] + pub added_by: WasmIotaAddress, + pub timestamp: u64, +} + +impl From for WasmRecordAdded { + fn from(value: RecordAdded) -> Self { + Self { + trail_id: value.trail_id.to_string(), + sequence_number: value.sequence_number, + added_by: value.added_by.to_string(), + timestamp: value.timestamp, + } + } +} + +/// Event payload emitted when a record is deleted. +#[wasm_bindgen(js_name = RecordDeleted, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmRecordDeleted { + #[wasm_bindgen(js_name = trailId)] + pub trail_id: String, + #[wasm_bindgen(js_name = sequenceNumber)] + pub sequence_number: u64, + #[wasm_bindgen(js_name = deletedBy)] + pub deleted_by: WasmIotaAddress, + pub timestamp: u64, +} + +impl From for WasmRecordDeleted { + fn from(value: RecordDeleted) -> Self { + Self { + trail_id: value.trail_id.to_string(), + sequence_number: value.sequence_number, + deleted_by: value.deleted_by.to_string(), + timestamp: value.timestamp, + } + } +} + +/// Event payload emitted when a capability is issued. +#[wasm_bindgen(js_name = CapabilityIssued, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmCapabilityIssued { + #[wasm_bindgen(js_name = targetKey)] + pub target_key: String, + #[wasm_bindgen(js_name = capabilityId)] + pub capability_id: String, + pub role: String, + #[wasm_bindgen(js_name = issuedTo)] + pub issued_to: Option, + #[wasm_bindgen(js_name = validFrom)] + pub valid_from: Option, + #[wasm_bindgen(js_name = validUntil)] + pub valid_until: Option, +} + +impl From for WasmCapabilityIssued { + fn from(value: CapabilityIssued) -> Self { + Self { + target_key: value.target_key.to_string(), + capability_id: value.capability_id.to_string(), + role: value.role, + issued_to: value.issued_to.map(|address| address.to_string()), + valid_from: value.valid_from, + valid_until: value.valid_until, + } + } +} + +/// Event payload emitted when a capability is destroyed. +#[wasm_bindgen(js_name = CapabilityDestroyed, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmCapabilityDestroyed { + #[wasm_bindgen(js_name = targetKey)] + pub target_key: String, + #[wasm_bindgen(js_name = capabilityId)] + pub capability_id: String, + pub role: String, + #[wasm_bindgen(js_name = issuedTo)] + pub issued_to: Option, + #[wasm_bindgen(js_name = validFrom)] + pub valid_from: Option, + #[wasm_bindgen(js_name = validUntil)] + pub valid_until: Option, +} + +impl From for WasmCapabilityDestroyed { + fn from(value: CapabilityDestroyed) -> Self { + Self { + target_key: value.target_key.to_string(), + capability_id: value.capability_id.to_string(), + role: value.role, + issued_to: value.issued_to.map(|address| address.to_string()), + valid_from: value.valid_from, + valid_until: value.valid_until, + } + } +} + +/// Event payload emitted when a capability is revoked. +#[wasm_bindgen(js_name = CapabilityRevoked, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmCapabilityRevoked { + #[wasm_bindgen(js_name = targetKey)] + pub target_key: String, + #[wasm_bindgen(js_name = capabilityId)] + pub capability_id: String, + #[wasm_bindgen(js_name = validUntil)] + pub valid_until: u64, +} + +impl From for WasmCapabilityRevoked { + fn from(value: CapabilityRevoked) -> Self { + Self { + target_key: value.target_key.to_string(), + capability_id: value.capability_id.to_string(), + valid_until: value.valid_until, + } + } +} + +/// Event payload emitted when a role is created. +#[wasm_bindgen(js_name = RoleCreated, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmRoleCreated { + #[wasm_bindgen(js_name = trailId)] + pub trail_id: String, + pub role: String, + pub permissions: WasmPermissionSet, + #[wasm_bindgen(js_name = roleTags)] + pub role_tags: Option, + #[wasm_bindgen(js_name = createdBy)] + pub created_by: WasmIotaAddress, + pub timestamp: u64, +} + +impl From for WasmRoleCreated { + fn from(value: RoleCreated) -> Self { + Self { + trail_id: value.trail_id.to_string(), + role: value.role, + permissions: value.permissions.into(), + role_tags: value.data.map(Into::into), + created_by: value.created_by.to_string(), + timestamp: value.timestamp, + } + } +} + +/// Event payload emitted when a role is updated. +#[wasm_bindgen(js_name = RoleUpdated, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmRoleUpdated { + #[wasm_bindgen(js_name = trailId)] + pub trail_id: String, + pub role: String, + pub permissions: WasmPermissionSet, + #[wasm_bindgen(js_name = roleTags)] + pub role_tags: Option, + #[wasm_bindgen(js_name = updatedBy)] + pub updated_by: WasmIotaAddress, + pub timestamp: u64, +} + +impl From for WasmRoleUpdated { + fn from(value: RoleUpdated) -> Self { + Self { + trail_id: value.trail_id.to_string(), + role: value.role, + permissions: value.permissions.into(), + role_tags: value.data.map(Into::into), + updated_by: value.updated_by.to_string(), + timestamp: value.timestamp, + } + } +} + +/// Event payload emitted when a role is deleted. +#[wasm_bindgen(js_name = RoleDeleted, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmRoleDeleted { + #[wasm_bindgen(js_name = trailId)] + pub trail_id: String, + pub role: String, + #[wasm_bindgen(js_name = deletedBy)] + pub deleted_by: WasmIotaAddress, + pub timestamp: u64, +} + +impl From for WasmRoleDeleted { + fn from(value: RoleDeleted) -> Self { + Self { + trail_id: value.trail_id.to_string(), + role: value.role, + deleted_by: value.deleted_by.to_string(), + timestamp: value.timestamp, + } + } +} + +/// Discriminant for the shape stored inside [`WasmTimeLock`]. +#[wasm_bindgen(js_name = TimeLockType)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum WasmTimeLockType { + None, + UnlockAt, + UnlockAtMs, + UntilDestroyed, + Infinite, +} + +/// JS-friendly wrapper for time locks. +#[wasm_bindgen(js_name = TimeLock, inspectable)] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WasmTimeLock(pub(crate) TimeLock); + +#[wasm_bindgen(js_class = TimeLock)] +impl WasmTimeLock { + /// Creates a lock that unlocks at a Unix timestamp in seconds. + #[wasm_bindgen(js_name = withUnlockAt)] + pub fn with_unlock_at(time_sec: u32) -> Self { + Self(TimeLock::UnlockAt(time_sec)) + } + + /// Creates a lock that unlocks at a Unix timestamp in milliseconds. + #[wasm_bindgen(js_name = withUnlockAtMs)] + pub fn with_unlock_at_ms(time_ms: u64) -> Self { + Self(TimeLock::UnlockAtMs(time_ms)) + } + + /// Creates a lock that stays active until the protected object is destroyed. + #[wasm_bindgen(js_name = withUntilDestroyed)] + pub fn with_until_destroyed() -> Self { + Self(TimeLock::UntilDestroyed) + } + + /// Creates a lock that never unlocks. + #[wasm_bindgen(js_name = withInfinite)] + pub fn with_infinite() -> Self { + Self(TimeLock::Infinite) + } + + /// Creates a disabled lock. + #[wasm_bindgen(js_name = withNone)] + pub fn with_none() -> Self { + Self(TimeLock::None) + } + + /// Returns the lock variant. + #[wasm_bindgen(js_name = "type", getter)] + pub fn lock_type(&self) -> WasmTimeLockType { + match self.0 { + TimeLock::None => WasmTimeLockType::None, + TimeLock::UnlockAt(_) => WasmTimeLockType::UnlockAt, + TimeLock::UnlockAtMs(_) => WasmTimeLockType::UnlockAtMs, + TimeLock::UntilDestroyed => WasmTimeLockType::UntilDestroyed, + TimeLock::Infinite => WasmTimeLockType::Infinite, + } + } + + /// Returns the lock argument for parameterized variants. + #[wasm_bindgen(js_name = "args", getter)] + pub fn args(&self) -> JsValue { + match self.0 { + TimeLock::UnlockAt(value) => JsValue::from(value), + TimeLock::UnlockAtMs(value) => JsValue::from(value), + _ => JsValue::UNDEFINED, + } + } +} + +impl From for WasmTimeLock { + fn from(value: TimeLock) -> Self { + Self(value) + } +} + +impl From for TimeLock { + fn from(value: WasmTimeLock) -> Self { + value.0 + } +} + +/// Discriminant for the shape stored inside [`WasmLockingWindow`]. +#[wasm_bindgen(js_name = LockingWindowType)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum WasmLockingWindowType { + None, + TimeBased, + CountBased, +} + +/// JS-friendly wrapper for delete windows. +#[wasm_bindgen(js_name = LockingWindow, inspectable)] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WasmLockingWindow(pub(crate) LockingWindow); + +#[wasm_bindgen(js_class = LockingWindow)] +impl WasmLockingWindow { + /// Creates a disabled delete window. + #[wasm_bindgen(js_name = withNone)] + pub fn with_none() -> Self { + Self(LockingWindow::None) + } + + /// Creates a time-based delete window. + #[wasm_bindgen(js_name = withTimeBased)] + pub fn with_time_based(seconds: u64) -> Self { + Self(LockingWindow::TimeBased { seconds }) + } + + /// Creates a count-based delete window. + #[wasm_bindgen(js_name = withCountBased)] + pub fn with_count_based(count: u64) -> Self { + Self(LockingWindow::CountBased { count }) + } + + /// Returns the window variant. + #[wasm_bindgen(js_name = "type", getter)] + pub fn window_type(&self) -> WasmLockingWindowType { + match self.0 { + LockingWindow::None => WasmLockingWindowType::None, + LockingWindow::TimeBased { .. } => WasmLockingWindowType::TimeBased, + LockingWindow::CountBased { .. } => WasmLockingWindowType::CountBased, + } + } + + /// Returns the window argument for parameterized variants. + #[wasm_bindgen(js_name = "args", getter)] + pub fn args(&self) -> JsValue { + match self.0 { + LockingWindow::TimeBased { seconds } => JsValue::from(seconds), + LockingWindow::CountBased { count } => JsValue::from(count), + LockingWindow::None => JsValue::UNDEFINED, + } + } +} + +impl From for WasmLockingWindow { + fn from(value: LockingWindow) -> Self { + Self(value) + } +} + +impl From for LockingWindow { + fn from(value: WasmLockingWindow) -> Self { + value.0 + } +} + +/// Full locking configuration exposed to wasm consumers. +#[wasm_bindgen(js_name = LockingConfig, getter_with_clone, inspectable)] +#[derive(Clone, Serialize, Deserialize)] +pub struct WasmLockingConfig { + #[wasm_bindgen(js_name = deleteRecordWindow)] + pub delete_record_window: WasmLockingWindow, + #[wasm_bindgen(js_name = deleteTrailLock)] + pub delete_trail_lock: WasmTimeLock, + #[wasm_bindgen(js_name = writeLock)] + pub write_lock: WasmTimeLock, +} + +#[wasm_bindgen(js_class = LockingConfig)] +impl WasmLockingConfig { + /// Creates a locking configuration. + #[wasm_bindgen(constructor)] + pub fn new( + delete_record_window: WasmLockingWindow, + delete_trail_lock: WasmTimeLock, + write_lock: WasmTimeLock, + ) -> Self { + Self { + delete_record_window, + delete_trail_lock, + write_lock, + } + } +} + +impl From for WasmLockingConfig { + fn from(value: LockingConfig) -> Self { + Self { + delete_record_window: value.delete_record_window.into(), + delete_trail_lock: value.delete_trail_lock.into(), + write_lock: value.write_lock.into(), + } + } +} + +impl From for LockingConfig { + fn from(value: WasmLockingConfig) -> Self { + Self { + delete_record_window: value.delete_record_window.into(), + delete_trail_lock: value.delete_trail_lock.into(), + write_lock: value.write_lock.into(), + } + } +} + +/// Immutable trail metadata exposed to wasm consumers. +#[wasm_bindgen(js_name = ImmutableMetadata, getter_with_clone, inspectable)] +#[derive(Clone, Serialize, Deserialize)] +pub struct WasmImmutableMetadata { + pub name: String, + pub description: Option, +} + +impl From for WasmImmutableMetadata { + fn from(value: ImmutableMetadata) -> Self { + Self { + name: value.name, + description: value.description, + } + } +} + +impl From for ImmutableMetadata { + fn from(value: WasmImmutableMetadata) -> Self { + ImmutableMetadata { + name: value.name, + description: value.description, + } + } +} + +/// Correction metadata attached to a record. +#[wasm_bindgen(js_name = RecordCorrection, getter_with_clone, inspectable)] +#[derive(Clone, Serialize, Deserialize)] +pub struct WasmRecordCorrection { + pub replaces: Vec, + #[wasm_bindgen(js_name = isReplacedBy)] + pub is_replaced_by: Option, +} + +impl From for WasmRecordCorrection { + fn from(value: RecordCorrection) -> Self { + let mut replaces: Vec = value.replaces.into_iter().collect(); + replaces.sort_unstable(); + Self { + replaces, + is_replaced_by: value.is_replaced_by, + } + } +} + +impl From for RecordCorrection { + fn from(value: WasmRecordCorrection) -> Self { + Self { + replaces: value.replaces.into_iter().collect::>(), + is_replaced_by: value.is_replaced_by, + } + } +} + +/// Single audit-trail record exposed to wasm consumers. +#[wasm_bindgen(js_name = Record, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmRecord { + pub data: WasmData, + pub metadata: Option, + pub tag: Option, + #[wasm_bindgen(js_name = sequenceNumber)] + pub sequence_number: u64, + #[wasm_bindgen(js_name = addedBy)] + pub added_by: WasmIotaAddress, + #[wasm_bindgen(js_name = addedAt)] + pub added_at: u64, + pub correction: WasmRecordCorrection, +} + +impl From> for WasmRecord { + fn from(value: Record) -> Self { + Self { + data: value.data.into(), + metadata: value.metadata, + tag: value.tag, + sequence_number: value.sequence_number, + added_by: value.added_by.to_string(), + added_at: value.added_at, + correction: value.correction.into(), + } + } +} + +/// One page of records returned by `TrailRecords.listPage(...)`. +#[wasm_bindgen(js_name = PaginatedRecord, getter_with_clone, inspectable)] +#[derive(Clone)] +pub struct WasmPaginatedRecord { + pub records: Vec, + #[wasm_bindgen(js_name = nextCursor)] + pub next_cursor: Option, + #[wasm_bindgen(js_name = hasNextPage)] + pub has_next_page: bool, +} + +impl From> for WasmPaginatedRecord { + fn from(value: PaginatedRecord) -> Self { + Self { + records: value.records.into_values().map(Into::into).collect(), + next_cursor: value.next_cursor, + has_next_page: value.has_next_page, + } + } +} diff --git a/bindings/wasm/audit_trail_wasm/tsconfig.json b/bindings/wasm/audit_trail_wasm/tsconfig.json new file mode 100644 index 00000000..6b1fd874 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/tsconfig.json @@ -0,0 +1,11 @@ +{ + "extends": "../tsconfig.json", + "compilerOptions": { + "baseUrl": ".", + "paths": { + "@iota/audit-trail/*": [ + "./*" + ] + } + } +} diff --git a/bindings/wasm/audit_trail_wasm/tsconfig.node.json b/bindings/wasm/audit_trail_wasm/tsconfig.node.json new file mode 100644 index 00000000..c09b2e5a --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/tsconfig.node.json @@ -0,0 +1,7 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "esModuleInterop": true, + "module": "commonjs" + } +} diff --git a/bindings/wasm/audit_trail_wasm/tsconfig.typedoc.json b/bindings/wasm/audit_trail_wasm/tsconfig.typedoc.json new file mode 100644 index 00000000..bfc43be9 --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/tsconfig.typedoc.json @@ -0,0 +1,6 @@ +{ + "extends": "./tsconfig.node.json", + "include": [ + "node/**/*" + ] +} diff --git a/bindings/wasm/audit_trail_wasm/typedoc.json b/bindings/wasm/audit_trail_wasm/typedoc.json new file mode 100644 index 00000000..c28f411b --- /dev/null +++ b/bindings/wasm/audit_trail_wasm/typedoc.json @@ -0,0 +1,11 @@ +{ + "name": "@iota/audit-trail API documentation", + "extends": [ + "../typedoc.json" + ], + "entryPoints": [ + "./node/" + ], + "tsconfig": "./tsconfig.typedoc.json", + "out": "./docs/wasm" +} diff --git a/bindings/wasm/notarization_wasm/Cargo.toml b/bindings/wasm/notarization_wasm/Cargo.toml index 237cb2cd..24f164c0 100644 --- a/bindings/wasm/notarization_wasm/Cargo.toml +++ b/bindings/wasm/notarization_wasm/Cargo.toml @@ -21,8 +21,8 @@ async-trait = { version = "0.1", default-features = false } bcs = "0.1.6" console_error_panic_hook = { version = "0.1" } fastcrypto = { git = "https://github.com/MystenLabs/fastcrypto", rev = "69d496c71fb37e3d22fe85e5bbfd4256d61422b9", package = "fastcrypto" } -iota_interaction = { git = "https://github.com/iotaledger/product-core.git", tag = "v0.8.15", package = "iota_interaction", default-features = false } -iota_interaction_ts = { git = "https://github.com/iotaledger/product-core.git", tag = "v0.8.15", package = "iota_interaction_ts" } +iota_interaction = { git = "https://github.com/iotaledger/product-core.git", branch = "feat/tf-compoenents-dev", package = "iota_interaction", default-features = false } +iota_interaction_ts = { git = "https://github.com/iotaledger/product-core.git", branch = "feat/tf-compoenents-dev", package = "iota_interaction_ts" } js-sys = { version = "=0.3.85" } prefix-hex = { version = "0.7", default-features = false } serde = { version = "1.0", features = ["derive"] } @@ -37,9 +37,16 @@ wasm-bindgen-futures = { version = "0.4", default-features = false } [dependencies.product_common] git = "https://github.com/iotaledger/product-core.git" -tag = "v0.8.15" +branch = "feat/tf-compoenents-dev" package = "product_common" -features = ["core-client", "transaction", "bindings", "binding-utils", "gas-station", "default-http-client"] +features = [ + "core-client", + "transaction", + "bindings", + "binding-utils", + "gas-station", + "default-http-client", +] [dependencies.notarization] path = "../../../notarization-rs" diff --git a/bindings/wasm/notarization_wasm/src/wasm_notarization_client.rs b/bindings/wasm/notarization_wasm/src/wasm_notarization_client.rs index 20efd4a2..7578b6e6 100644 --- a/bindings/wasm/notarization_wasm/src/wasm_notarization_client.rs +++ b/bindings/wasm/notarization_wasm/src/wasm_notarization_client.rs @@ -94,6 +94,15 @@ impl WasmNotarizationClient { .collect() } + /// Retrieves the [`TfComponents`] package ID for the current network, if available. + /// + /// # Returns + /// The package ID as a string, or `undefined` when no package applies. + #[wasm_bindgen(js_name = tfComponentsPackageId)] + pub fn tf_components_package_id(&self) -> Option { + None + } + /// Retrieves the IOTA client instance. /// /// # Returns diff --git a/bindings/wasm/notarization_wasm/src/wasm_notarization_client_read_only.rs b/bindings/wasm/notarization_wasm/src/wasm_notarization_client_read_only.rs index dd6f838a..9ddd7b25 100644 --- a/bindings/wasm/notarization_wasm/src/wasm_notarization_client_read_only.rs +++ b/bindings/wasm/notarization_wasm/src/wasm_notarization_client_read_only.rs @@ -86,6 +86,15 @@ impl WasmNotarizationClientReadOnly { .collect() } + /// Retrieves the [`TfComponents`] package ID for the current network, if available. + /// + /// # Returns + /// The package ID as a string, or `undefined` when no package applies. + #[wasm_bindgen(js_name = tfComponentsPackageId)] + pub fn tf_components_package_id(&self) -> Option { + None + } + /// Retrieves the underlying IOTA client used by this client. /// /// # Returns diff --git a/bindings/wasm/notarization_wasm/src/wasm_time_lock.rs b/bindings/wasm/notarization_wasm/src/wasm_time_lock.rs index 9455bd90..cf3e42fd 100644 --- a/bindings/wasm/notarization_wasm/src/wasm_time_lock.rs +++ b/bindings/wasm/notarization_wasm/src/wasm_time_lock.rs @@ -9,14 +9,18 @@ use wasm_bindgen::prelude::*; /// /// This enum defines the possible types of time locks that can be applied to a notarization object. /// - `None`: No time lock is applied. -/// - `UnlockAt`: The object will unlock at a specific timestamp. +/// - `UnlockAt`: The object will unlock at a specific timestamp (seconds since Unix epoch). +/// - `UnlockAtMs`: Same as UnlockAt (unlocks at specific timestamp) but using milliseconds since Unix epoch. /// - `UntilDestroyed`: The object remains locked until it is destroyed. Can not be used for `delete_lock`. +/// - `Infinite`: The object is permanently locked and will never unlock. #[wasm_bindgen(js_name = TimeLockType)] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum WasmTimeLockType { None = "None", UnlockAt = "UnlockAt", + UnlockAtMs = "UnlockAtMs", UntilDestroyed = "UntilDestroyed", + Infinite = "Infinite", } /// Represents a time lock configuration. @@ -28,16 +32,28 @@ pub struct WasmTimeLock(pub(crate) TimeLock); #[wasm_bindgen(js_class = TimeLock)] impl WasmTimeLock { - /// Creates a time lock that unlocks at a specific timestamp. + /// Creates a time lock that unlocks at a specific seconds based timestamp. /// /// # Arguments - /// * `time` - The timestamp in seconds since the Unix epoch at which the object will unlock. + /// * `time_sec` - The timestamp in seconds since the Unix epoch at which the object will unlock. /// /// # Returns /// A new `TimeLock` instance configured to unlock at the specified timestamp. #[wasm_bindgen(js_name = withUnlockAt)] - pub fn with_unlock_at(time: u32) -> Self { - Self(TimeLock::UnlockAt(time)) + pub fn with_unlock_at(time_sec: u32) -> Self { + Self(TimeLock::UnlockAt(time_sec)) + } + + /// Creates a time lock that unlocks at a specific milliseconds based timestamp. + /// + /// # Arguments + /// * `time_ms` - The timestamp in milliseconds since the Unix epoch at which the object will unlock. + /// + /// # Returns + /// A new `TimeLock` instance configured to unlock at the specified timestamp. + #[wasm_bindgen(js_name = withUnlockAtMs)] + pub fn with_unlock_at_ms(time_ms: u64) -> Self { + Self(TimeLock::UnlockAtMs(time_ms)) } /// Creates a time lock that remains locked until the object is destroyed. @@ -49,6 +65,15 @@ impl WasmTimeLock { Self(TimeLock::UntilDestroyed) } + /// Creates a time lock that is locked permanently and will never be unlocked + /// + /// # Returns + /// A new `TimeLock` instance configured to remain locked infinitely. + #[wasm_bindgen(js_name = withInfinite)] + pub fn with_infinite() -> Self { + Self(TimeLock::Infinite) + } + /// Creates a time lock with no restrictions. /// /// # Returns @@ -66,7 +91,9 @@ impl WasmTimeLock { pub fn lock_type(&self) -> WasmTimeLockType { match &self.0 { TimeLock::UnlockAt(_) => WasmTimeLockType::UnlockAt, + TimeLock::UnlockAtMs(_) => WasmTimeLockType::UnlockAtMs, TimeLock::UntilDestroyed => WasmTimeLockType::UntilDestroyed, + TimeLock::Infinite => WasmTimeLockType::Infinite, TimeLock::None => WasmTimeLockType::None, } } @@ -81,6 +108,7 @@ impl WasmTimeLock { pub fn args(&self) -> JsValue { match &self.0 { TimeLock::UnlockAt(u) => JsValue::from(*u), + TimeLock::UnlockAtMs(u) => JsValue::from(*u), _ => JsValue::UNDEFINED, } } diff --git a/dprint.json b/dprint.json index e5444f58..5eee3a23 100644 --- a/dprint.json +++ b/dprint.json @@ -14,7 +14,8 @@ "excludes": [ "**/*-lock.json", "**/{node_modules, target}", - "bindings/wasm/notarization_wasm/{node,web}/**/*.{js,ts}" + "bindings/wasm/notarization_wasm/{node,web}/**/*.{js,ts}", + "bindings/wasm/audit_trail_wasm/{node,web}/**/*.{js,ts}" ], "plugins": [ "https://plugins.dprint.dev/markdown-0.18.0.wasm", diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 1669189c..5c0b68cf 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -10,46 +10,99 @@ path = "utils/utils.rs" [[example]] name = "01_create_locked_notarization" -path = "01_create_locked_notarization.rs" +path = "notarization/01_create_locked_notarization.rs" [[example]] name = "02_create_dynamic_notarization" -path = "02_create_dynamic_notarization.rs" +path = "notarization/02_create_dynamic_notarization.rs" [[example]] name = "03_update_dynamic_notarization" -path = "03_update_dynamic_notarization.rs" +path = "notarization/03_update_dynamic_notarization.rs" [[example]] name = "04_destroy_notarization" -path = "04_destroy_notarization.rs" +path = "notarization/04_destroy_notarization.rs" [[example]] name = "05_update_state" -path = "05_update_state.rs" +path = "notarization/05_update_state.rs" [[example]] name = "06_update_metadata" -path = "06_update_metadata.rs" +path = "notarization/06_update_metadata.rs" [[example]] name = "07_transfer_dynamic_notarization" -path = "07_transfer_dynamic_notarization.rs" +path = "notarization/07_transfer_dynamic_notarization.rs" [[example]] name = "08_access_read_only_methods" -path = "08_access_read_only_methods.rs" +path = "notarization/08_access_read_only_methods.rs" [[example]] name = "01_iot_weather_station" -path = "real-world/01_iot_weather_station.rs" +path = "notarization/real-world/01_iot_weather_station.rs" [[example]] name = "02_legal_contract" -path = "real-world/02_legal_contract.rs" +path = "notarization/real-world/02_legal_contract.rs" + +[[example]] +name = "01_create_audit_trail" +path = "audit-trail/01_create_audit_trail.rs" + +[[example]] +name = "02_add_and_read_records" +path = "audit-trail/02_add_and_read_records.rs" + +[[example]] +name = "03_update_metadata" +path = "audit-trail/03_update_metadata.rs" + +[[example]] +name = "04_configure_locking" +path = "audit-trail/04_configure_locking.rs" + +[[example]] +name = "05_manage_access" +path = "audit-trail/05_manage_access.rs" + +[[example]] +name = "06_delete_records" +path = "audit-trail/06_delete_records.rs" + +[[example]] +name = "07_access_read_only_methods" +path = "audit-trail/07_access_read_only_methods.rs" + +[[example]] +name = "08_delete_audit_trail" +path = "audit-trail/08_delete_audit_trail.rs" + +[[example]] +name = "09_tagged_records" +path = "audit-trail/advanced/09_tagged_records.rs" + +[[example]] +name = "10_capability_constraints" +path = "audit-trail/advanced/10_capability_constraints.rs" + +[[example]] +name = "11_manage_record_tags" +path = "audit-trail/advanced/11_manage_record_tags.rs" + +[[example]] +name = "01_customs_clearance" +path = "audit-trail/real-world/01_customs_clearance.rs" + +[[example]] +name = "02_clinical_trial" +path = "audit-trail/real-world/02_clinical_trial.rs" [dependencies] anyhow.workspace = true +audit_trail = { path = "../audit-trail-rs" } chrono = { workspace = true } iota-sdk = { workspace = true } notarization = { path = "../notarization-rs" } diff --git a/examples/audit-trail/01_create_audit_trail.rs b/examples/audit-trail/01_create_audit_trail.rs new file mode 100644 index 00000000..b4e7c6d4 --- /dev/null +++ b/examples/audit-trail/01_create_audit_trail.rs @@ -0,0 +1,123 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! ## Actors +//! +//! - **Admin**: Creates the trail and holds the built-in Admin capability that is automatically minted on creation. +//! - **RecordAdmin**: Receives a RecordAdmin capability bound to their address. Writes records in subsequent examples. + +use anyhow::Result; +use audit_trail::core::types::{CapabilityIssueOptions, Data, ImmutableMetadata, InitialRecord, PermissionSet}; +use examples::get_funded_audit_trail_client; +use product_common::core_client::CoreClient; + +/// Demonstrates how to: +/// 1. Create an audit trail with an initial record and metadata. +/// 2. Inspect the built-in Admin role that is automatically granted to the creator. +/// 3. Use the Admin capability to define a `RecordAdmin` role. +/// 4. Issue a capability for the `RecordAdmin` role to a specific address. +#[tokio::main] +async fn main() -> Result<()> { + println!("=== Audit Trail: Create Trail & Define Roles ===\n"); + + // `admin` creates the trail and holds the Admin capability that is automatically + // minted on creation. `record_admin` represents the actor who will later write records. + let admin = get_funded_audit_trail_client().await?; + let record_admin = get_funded_audit_trail_client().await?; + + println!("Admin address: {}", admin.sender_address()); + println!("RecordAdmin address: {}\n", record_admin.sender_address()); + + // ------------------------------------------------------------------------- + // Step 1: Create an audit trail + // ------------------------------------------------------------------------- + // The builder supports optional immutable metadata (name + description), + // mutable updatable metadata, an initial record, record tag registry, and + // locking configuration. + // + // On success, the transaction engine automatically mints an Admin capability + // object and transfers it to the sender's address. This capability grants + // full administrative control over the trail (role management, capability + // issuance, tag management, etc.). + let created = admin + .create_trail() + .with_trail_metadata(ImmutableMetadata::new( + "Product Shipment Audit Trail".to_string(), + Some("Immutable audit log for product lifecycle events".to_string()), + )) + .with_updatable_metadata("Status: Active") + .with_initial_record(InitialRecord::new( + Data::text("Shipment #SHP-20260401-001 created at warehouse A"), + Some("event:shipment_created;location:warehouse-a".to_string()), + None, + )) + .finish() + .build_and_execute(&admin) + .await? + .output; + + println!( + "Trail created!\n Trail ID: {}\n Creator: {}\n Timestamp: {} ms\n", + created.trail_id, created.creator, created.timestamp + ); + + // Fetch the on-chain trail object to inspect the automatically created Admin role. + let trail = admin.trail(created.trail_id).get().await?; + let admin_role_name = &trail.roles.initial_admin_role_name; + let admin_permissions = &trail.roles.roles[admin_role_name].permissions; + println!( + "Built-in admin role: \"{admin_role_name}\" ({} permissions)\n", + admin_permissions.len() + ); + + // ------------------------------------------------------------------------- + // Step 2: Define a RecordAdmin role + // ------------------------------------------------------------------------- + // The Admin capability (held by the sender) allows creating new roles. + // PermissionSet::record_admin_permissions() grants AddRecord, DeleteRecord, + // and CorrectRecord permissions. + let record_admin_role = "RecordAdmin"; + let role_created = admin + .trail(created.trail_id) + .access() + .for_role(record_admin_role) + .create(PermissionSet::record_admin_permissions(), None) + .build_and_execute(&admin) + .await? + .output; + + println!( + "Role \"{}\" defined with permissions:\n {:?}\n", + role_created.role, role_created.permissions.permissions + ); + + // ------------------------------------------------------------------------- + // Step 3: Issue a capability for the RecordAdmin role + // ------------------------------------------------------------------------- + // A Capability object is minted on-chain and transferred to `record_admin`'s + // address. Only the holder of that address can use it to write records. + let capability = admin + .trail(created.trail_id) + .access() + .for_role(record_admin_role) + .issue_capability(CapabilityIssueOptions { + issued_to: Some(record_admin.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await? + .output; + + println!( + "Capability issued!\n Capability ID: {}\n Trail ID: {}\n Role: {}\n Issued to: {}", + capability.capability_id, + capability.target_key, + capability.role, + capability + .issued_to + .map_or_else(|| "any holder (no address restriction)".to_string(), |a| a.to_string()) + ); + + Ok(()) +} diff --git a/examples/audit-trail/02_add_and_read_records.rs b/examples/audit-trail/02_add_and_read_records.rs new file mode 100644 index 00000000..903edbc8 --- /dev/null +++ b/examples/audit-trail/02_add_and_read_records.rs @@ -0,0 +1,158 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! ## Actors +//! +//! - **Admin**: Creates the trail, defines the RecordAdmin role, and issues a capability. +//! - **RecordAdmin**: Holds the capability and writes records. Reads are also done through this client to demonstrate +//! that any address can read, but only the cap holder can write. + +use anyhow::{Result, ensure}; +use audit_trail::core::types::{CapabilityIssueOptions, Data, InitialRecord, PermissionSet}; +use examples::get_funded_audit_trail_client; +use product_common::core_client::CoreClient; + +/// Demonstrates how to: +/// 1. Create an audit trail with an initial record. +/// 2. Define a `RecordAdmin` role and issue a capability for it. +/// 3. Add follow-up records to the trail. +/// 4. Read records back individually and through paginated traversal. +#[tokio::main] +async fn main() -> Result<()> { + println!("=== Audit Trail: Add & Read Records ===\n"); + + // `admin` creates the trail and manages roles. + // `record_admin` holds the RecordAdmin capability and writes records. + let admin = get_funded_audit_trail_client().await?; + let record_admin = get_funded_audit_trail_client().await?; + + println!("Admin address: {}", admin.sender_address()); + println!("RecordAdmin address: {}\n", record_admin.sender_address()); + + // ------------------------------------------------------------------------- + // Step 1: Create a trail with one initial record + // ------------------------------------------------------------------------- + let created = admin + .create_trail() + .with_initial_record(InitialRecord::new( + Data::text("Trail opened"), + Some("event:trail_created".to_string()), + None, + )) + .finish() + .build_and_execute(&admin) + .await? + .output; + + let trail_id = created.trail_id; + println!("Trail created: {trail_id}\n"); + + // ------------------------------------------------------------------------- + // Step 2: Create a record-admin role and issue a capability for it + // ------------------------------------------------------------------------- + admin + .trail(trail_id) + .access() + .for_role("RecordAdmin") + .create(PermissionSet::record_admin_permissions(), None) + .build_and_execute(&admin) + .await?; + + let capability = admin + .trail(trail_id) + .access() + .for_role("RecordAdmin") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(record_admin.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await? + .output; + + println!( + "Issued capability {} for role {}\n", + capability.capability_id, capability.role + ); + + // ------------------------------------------------------------------------- + // Step 3: Append follow-up records + // ------------------------------------------------------------------------- + // The client automatically finds the capability in `record_admin`'s wallet. + let records = record_admin.trail(trail_id).records(); + + let first_added = records + .add( + Data::text("Shipment received at warehouse A"), + Some("event:received".to_string()), + None, + ) + .build_and_execute(&record_admin) + .await? + .output; + + let second_added = records + .add( + Data::text("Shipment dispatched to retailer"), + Some("event:dispatched".to_string()), + None, + ) + .build_and_execute(&record_admin) + .await? + .output; + + println!( + "Added records at sequence numbers {} and {}\n", + first_added.sequence_number, second_added.sequence_number + ); + + // ------------------------------------------------------------------------- + // Step 4: Read records back by sequence number + // ------------------------------------------------------------------------- + let initial = records.get(0).await?; + let first = records.get(first_added.sequence_number).await?; + let second = records.get(second_added.sequence_number).await?; + + println!("Initial record: {:?}", initial.data); + println!("First added record: {:?}", first.data); + println!("Second added record: {:?}\n", second.data); + + ensure!(matches!(initial.data, Data::Text(ref text) if text == "Trail opened")); + ensure!(matches!( + first.data, + Data::Text(ref text) if text == "Shipment received at warehouse A" + )); + ensure!(matches!( + second.data, + Data::Text(ref text) if text == "Shipment dispatched to retailer" + )); + + // ------------------------------------------------------------------------- + // Step 5: Inspect record count and page through the linked table + // ------------------------------------------------------------------------- + let count = records.record_count().await?; + println!("Current record count: {count}"); + ensure!(count == 3, "expected 3 records, got {count}"); + + let first_page = records.list_page(None, 2).await?; + println!( + "First page contains {} records; has_next_page = {}", + first_page.records.len(), + first_page.has_next_page + ); + + let second_page = records.list_page(first_page.next_cursor, 2).await?; + println!( + "Second page contains {} records; has_next_page = {}", + second_page.records.len(), + second_page.has_next_page + ); + + ensure!(first_page.records.len() == 2, "expected first page size 2"); + ensure!(second_page.records.len() == 1, "expected second page size 1"); + + println!("\nRecord flow completed successfully."); + + Ok(()) +} diff --git a/examples/audit-trail/03_update_metadata.rs b/examples/audit-trail/03_update_metadata.rs new file mode 100644 index 00000000..94eac179 --- /dev/null +++ b/examples/audit-trail/03_update_metadata.rs @@ -0,0 +1,107 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! ## Actors +//! +//! - **Admin**: Creates the trail and sets up the MetadataAdmin role. +//! - **MetadataAdmin**: Holds the MetadataAdmin capability and updates the trail's mutable status field. Has no +//! record-write permissions. + +use anyhow::{Result, ensure}; +use audit_trail::core::types::{CapabilityIssueOptions, Data, ImmutableMetadata, InitialRecord, PermissionSet}; +use examples::get_funded_audit_trail_client; +use product_common::core_client::CoreClient; + +/// Demonstrates how to: +/// 1. Create a trail with immutable and updatable metadata. +/// 2. Delegate metadata updates through a dedicated `MetadataAdmin` role. +/// 3. Change and clear the trail's updatable metadata. +/// 4. Verify that immutable metadata never changes. +#[tokio::main] +async fn main() -> Result<()> { + println!("=== Audit Trail: Update Metadata ===\n"); + + // `admin` creates the trail and manages roles. + // `metadata_admin` holds the MetadataAdmin capability and updates the trail status. + let admin = get_funded_audit_trail_client().await?; + let metadata_admin = get_funded_audit_trail_client().await?; + + let immutable_metadata = ImmutableMetadata::new( + "Shipment Processing".to_string(), + Some("Tracks the lifecycle of a warehouse shipment".to_string()), + ); + + let created = admin + .create_trail() + .with_trail_metadata(immutable_metadata.clone()) + .with_updatable_metadata("Status: Draft") + .with_initial_record(InitialRecord::new( + Data::text("Shipment created"), + Some("event:created".to_string()), + None, + )) + .finish() + .build_and_execute(&admin) + .await? + .output; + + let trail_id = created.trail_id; + + admin + .trail(trail_id) + .access() + .for_role("MetadataAdmin") + .create(PermissionSet::metadata_admin_permissions(), None) + .build_and_execute(&admin) + .await?; + + admin + .trail(trail_id) + .access() + .for_role("MetadataAdmin") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(metadata_admin.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await?; + + let before = admin.trail(trail_id).get().await?; + println!( + "Before update:\n immutable = {:?}\n updatable = {:?}\n", + before.immutable_metadata, before.updatable_metadata + ); + + metadata_admin + .trail(trail_id) + .update_metadata(Some("Status: In Review".to_string())) + .build_and_execute(&metadata_admin) + .await?; + + let after_update = admin.trail(trail_id).get().await?; + println!( + "After update:\n immutable = {:?}\n updatable = {:?}\n", + after_update.immutable_metadata, after_update.updatable_metadata + ); + + ensure!(after_update.immutable_metadata == Some(immutable_metadata.clone())); + ensure!(after_update.updatable_metadata.as_deref() == Some("Status: In Review")); + + metadata_admin + .trail(trail_id) + .update_metadata(None) + .build_and_execute(&metadata_admin) + .await?; + + let after_clear = admin.trail(trail_id).get().await?; + println!( + "After clear:\n immutable = {:?}\n updatable = {:?}", + after_clear.immutable_metadata, after_clear.updatable_metadata + ); + + ensure!(after_clear.immutable_metadata == Some(immutable_metadata)); + ensure!(after_clear.updatable_metadata.is_none()); + + Ok(()) +} diff --git a/examples/audit-trail/04_configure_locking.rs b/examples/audit-trail/04_configure_locking.rs new file mode 100644 index 00000000..c9c71caa --- /dev/null +++ b/examples/audit-trail/04_configure_locking.rs @@ -0,0 +1,149 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! ## Actors +//! +//! - **Admin**: Creates the trail and sets up the LockingAdmin and RecordAdmin roles. +//! - **LockingAdmin**: Controls write and delete locks. Holds the LockingAdmin capability. +//! - **RecordAdmin**: Writes records. Used to demonstrate that the write lock is enforced per-sender, not just checked +//! by the admin. + +use anyhow::{Result, ensure}; +use audit_trail::core::types::{CapabilityIssueOptions, Data, InitialRecord, LockingWindow, PermissionSet, TimeLock}; +use examples::get_funded_audit_trail_client; +use product_common::core_client::CoreClient; + +/// Demonstrates how to: +/// 1. Delegate locking updates through a `LockingAdmin` role. +/// 2. Freeze record creation with a write lock. +/// 3. Restore writes and add a new record. +/// 4. Update the delete-record window and delete-trail lock. +#[tokio::main] +async fn main() -> Result<()> { + println!("=== Audit Trail: Configure Locking ===\n"); + + // `admin` creates the trail and manages roles. + // `locking_admin` controls write and delete locks. + // `record_admin` writes records. + let admin = get_funded_audit_trail_client().await?; + let locking_admin = get_funded_audit_trail_client().await?; + let record_admin = get_funded_audit_trail_client().await?; + + let created = admin + .create_trail() + .with_initial_record(InitialRecord::new( + Data::text("Trail opened"), + Some("event:created".to_string()), + None, + )) + .finish() + .build_and_execute(&admin) + .await? + .output; + + let trail_id = created.trail_id; + + admin + .trail(trail_id) + .access() + .for_role("LockingAdmin") + .create(PermissionSet::locking_admin_permissions(), None) + .build_and_execute(&admin) + .await?; + admin + .trail(trail_id) + .access() + .for_role("LockingAdmin") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(locking_admin.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await?; + + admin + .trail(trail_id) + .access() + .for_role("RecordAdmin") + .create(PermissionSet::record_admin_permissions(), None) + .build_and_execute(&admin) + .await?; + admin + .trail(trail_id) + .access() + .for_role("RecordAdmin") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(record_admin.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await?; + + locking_admin + .trail(trail_id) + .locking() + .update_write_lock(TimeLock::Infinite) + .build_and_execute(&locking_admin) + .await?; + + let locked = admin.trail(trail_id).get().await?; + println!("Write lock after update: {:?}\n", locked.locking_config.write_lock); + ensure!(locked.locking_config.write_lock == TimeLock::Infinite); + + let blocked_add = record_admin + .trail(trail_id) + .records() + .add(Data::text("This write should fail"), None, None) + .build_and_execute(&record_admin) + .await; + ensure!(blocked_add.is_err(), "write lock should block adding records"); + + locking_admin + .trail(trail_id) + .locking() + .update_write_lock(TimeLock::None) + .build_and_execute(&locking_admin) + .await?; + + let added = record_admin + .trail(trail_id) + .records() + .add(Data::text("Write lock lifted"), Some("event:resumed".to_string()), None) + .build_and_execute(&record_admin) + .await? + .output; + + println!( + "Added record {} after clearing the write lock.\n", + added.sequence_number + ); + + locking_admin + .trail(trail_id) + .locking() + .update_delete_record_window(LockingWindow::CountBased { count: 2 }) + .build_and_execute(&locking_admin) + .await?; + locking_admin + .trail(trail_id) + .locking() + .update_delete_trail_lock(TimeLock::Infinite) + .build_and_execute(&locking_admin) + .await?; + + let final_state = admin.trail(trail_id).get().await?; + println!( + "Final locking config:\n delete_record_window = {:?}\n delete_trail_lock = {:?}\n write_lock = {:?}", + final_state.locking_config.delete_record_window, + final_state.locking_config.delete_trail_lock, + final_state.locking_config.write_lock + ); + + ensure!(final_state.locking_config.delete_record_window == LockingWindow::CountBased { count: 2 }); + ensure!(final_state.locking_config.delete_trail_lock == TimeLock::Infinite); + ensure!(final_state.locking_config.write_lock == TimeLock::None); + + Ok(()) +} diff --git a/examples/audit-trail/05_manage_access.rs b/examples/audit-trail/05_manage_access.rs new file mode 100644 index 00000000..828db2d4 --- /dev/null +++ b/examples/audit-trail/05_manage_access.rs @@ -0,0 +1,152 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! ## Actors +//! +//! - **Admin**: Creates and updates roles, issues capabilities, revokes and destroys them, and finally deletes the role +//! once it is no longer needed. +//! - **OperationsUser**: The subject of all capability issuance. Capabilities are bound to this address to demonstrate +//! that revocation immediately blocks their access. + +use std::collections::HashSet; + +use anyhow::{Result, ensure}; +use audit_trail::core::types::{CapabilityIssueOptions, Data, Permission, PermissionSet}; +use examples::get_funded_audit_trail_client; +use product_common::core_client::CoreClient; + +/// Demonstrates how to: +/// 1. Create and update a custom role. +/// 2. Issue a constrained capability for that role. +/// 3. Revoke one capability and destroy another. +/// 4. Remove the role after its capability lifecycle is complete. +#[tokio::main] +async fn main() -> Result<()> { + println!("=== Audit Trail: Manage Access ===\n"); + + // `admin` manages roles and capability lifecycle. + // `operations_user` represents the actor who receives (and later loses) access. + let admin = get_funded_audit_trail_client().await?; + let operations_user = get_funded_audit_trail_client().await?; + + let created = admin + .create_trail() + .with_initial_record(audit_trail::core::types::InitialRecord::new( + Data::text("Trail created"), + None, + None, + )) + .finish() + .build_and_execute(&admin) + .await? + .output; + + let trail_id = created.trail_id; + + let created_role = admin + .trail(trail_id) + .access() + .for_role("Operations") + .create(PermissionSet::record_admin_permissions(), None) + .build_and_execute(&admin) + .await? + .output; + println!("Created role: {}\n", created_role.role); + + let updated_permissions = PermissionSet { + permissions: HashSet::from([ + Permission::AddRecord, + Permission::DeleteRecord, + Permission::DeleteAllRecords, + ]), + }; + + let updated_role = admin + .trail(trail_id) + .access() + .for_role("Operations") + .update_permissions(updated_permissions.clone(), None) + .build_and_execute(&admin) + .await? + .output; + println!("Updated role permissions: {:?}\n", updated_role.permissions.permissions); + + let constrained_capability = admin + .trail(trail_id) + .access() + .for_role("Operations") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(operations_user.sender_address()), + valid_from_ms: None, + valid_until_ms: Some(4_102_444_800_000), + }) + .build_and_execute(&admin) + .await? + .output; + + println!( + "Issued constrained capability:\n id = {}\n issued_to = {:?}\n valid_until = {:?}\n", + constrained_capability.capability_id, constrained_capability.issued_to, constrained_capability.valid_until + ); + + let on_chain = admin.trail(trail_id).get().await?; + let role_definition = on_chain.roles.roles.get("Operations").expect("role must exist"); + ensure!(role_definition.permissions == updated_permissions.permissions); + + admin + .trail(trail_id) + .access() + .revoke_capability(constrained_capability.capability_id, constrained_capability.valid_until) + .build_and_execute(&admin) + .await?; + println!("Revoked capability {}\n", constrained_capability.capability_id); + + // destroy_capability consumes the capability object, so the signer must own it. + // The capability is issued to admin so admin can destroy it directly. + let disposable_capability = admin + .trail(trail_id) + .access() + .for_role("Operations") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(admin.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await? + .output; + + admin + .trail(trail_id) + .access() + .destroy_capability(disposable_capability.capability_id) + .build_and_execute(&admin) + .await?; + println!("Destroyed capability {}\n", disposable_capability.capability_id); + + admin + .trail(trail_id) + .access() + .cleanup_revoked_capabilities() + .build_and_execute(&admin) + .await?; + println!("Cleaned up revoked capability registry entries.\n"); + + admin + .trail(trail_id) + .access() + .for_role("Operations") + .delete() + .build_and_execute(&admin) + .await?; + + let after_delete = admin.trail(trail_id).get().await?; + ensure!( + !after_delete.roles.roles.contains_key("Operations"), + "role should be removed from the trail" + ); + + println!("Removed the custom role after its capability lifecycle completed."); + + Ok(()) +} diff --git a/examples/audit-trail/06_delete_records.rs b/examples/audit-trail/06_delete_records.rs new file mode 100644 index 00000000..c92f409f --- /dev/null +++ b/examples/audit-trail/06_delete_records.rs @@ -0,0 +1,114 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! ## Actors +//! +//! - **Admin**: Creates the trail and sets up the RecordMaintenance role. +//! - **RecordMaintainer**: Holds the RecordMaintenance capability. Adds records and then deletes them individually and +//! in batch. + +use std::collections::HashSet; + +use anyhow::{Result, ensure}; +use audit_trail::core::types::{CapabilityIssueOptions, Data, InitialRecord, Permission, PermissionSet}; +use examples::get_funded_audit_trail_client; +use product_common::core_client::CoreClient; + +/// Demonstrates how to: +/// 1. Create records using a delegated record-maintenance role. +/// 2. Delete a single record by sequence number. +/// 3. Delete the remaining records in one batch. +#[tokio::main] +async fn main() -> Result<()> { + println!("=== Audit Trail: Delete Records ===\n"); + + // `admin` creates the trail and manages roles. + // `record_maintainer` adds and deletes records. + let admin = get_funded_audit_trail_client().await?; + let record_maintainer = get_funded_audit_trail_client().await?; + + let created = admin + .create_trail() + .with_initial_record(InitialRecord::new( + Data::text("Initial record"), + Some("event:created".to_string()), + None, + )) + .finish() + .build_and_execute(&admin) + .await? + .output; + + let trail = admin.trail(created.trail_id); + + trail + .access() + .for_role("RecordMaintenance") + .create( + PermissionSet { + permissions: HashSet::from([ + Permission::AddRecord, + Permission::DeleteRecord, + Permission::DeleteAllRecords, + ]), + }, + None, + ) + .build_and_execute(&admin) + .await?; + + trail + .access() + .for_role("RecordMaintenance") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(record_maintainer.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await?; + + let records = record_maintainer.trail(created.trail_id).records(); + + let added_one = records + .add(Data::text("Second record"), Some("event:received".to_string()), None) + .build_and_execute(&record_maintainer) + .await? + .output; + let added_two = records + .add(Data::text("Third record"), Some("event:dispatched".to_string()), None) + .build_and_execute(&record_maintainer) + .await? + .output; + + println!( + "Trail has records at sequence numbers 0, {}, {}\n", + added_one.sequence_number, added_two.sequence_number + ); + ensure!(records.record_count().await? == 3); + + let deleted_one = records + .delete(added_one.sequence_number) + .build_and_execute(&record_maintainer) + .await? + .output; + println!("Deleted record {}\n", deleted_one.sequence_number); + + ensure!(records.record_count().await? == 2); + ensure!( + records.get(added_one.sequence_number).await.is_err(), + "deleted record should no longer be readable" + ); + + let deleted_remaining = records + .delete_records_batch(10) + .build_and_execute(&record_maintainer) + .await? + .output; + + println!("Batch deleted the remaining {deleted_remaining} records."); + ensure!(deleted_remaining == 2); + ensure!(records.record_count().await? == 0); + + Ok(()) +} diff --git a/examples/audit-trail/07_access_read_only_methods.rs b/examples/audit-trail/07_access_read_only_methods.rs new file mode 100644 index 00000000..19151284 --- /dev/null +++ b/examples/audit-trail/07_access_read_only_methods.rs @@ -0,0 +1,119 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! ## Actors +//! +//! - **Admin**: Creates the trail and sets up the RecordAdmin role. +//! - **RecordAdmin**: Adds one follow-up record. All subsequent operations are read-only and can be performed by any +//! address — no capability required. + +use anyhow::{Result, ensure}; +use audit_trail::core::types::{ + CapabilityIssueOptions, Data, ImmutableMetadata, InitialRecord, LockingConfig, LockingWindow, PermissionSet, + TimeLock, +}; +use examples::get_funded_audit_trail_client; +use product_common::core_client::CoreClient; + +/// Demonstrates how to: +/// 1. Load the full on-chain trail object. +/// 2. Inspect metadata, roles, and locking configuration. +/// 3. Read records individually and through pagination. +/// 4. Query the record-count and lock-status helpers. +#[tokio::main] +async fn main() -> Result<()> { + println!("=== Audit Trail: Read-Only Inspection ===\n"); + + // `admin` creates the trail and manages roles. + // `record_admin` adds the follow-up record. + let admin = get_funded_audit_trail_client().await?; + let record_admin = get_funded_audit_trail_client().await?; + + let created = admin + .create_trail() + .with_trail_metadata(ImmutableMetadata::new( + "Operations Trail".to_string(), + Some("Used to inspect read-only accessors".to_string()), + )) + .with_updatable_metadata("Status: Active") + .with_locking_config(LockingConfig { + delete_record_window: LockingWindow::CountBased { count: 2 }, + delete_trail_lock: TimeLock::None, + write_lock: TimeLock::None, + }) + .with_initial_record(InitialRecord::new( + Data::text("Initial record"), + Some("event:created".to_string()), + None, + )) + .finish() + .build_and_execute(&admin) + .await? + .output; + + let trail_id = created.trail_id; + + admin + .trail(trail_id) + .access() + .for_role("RecordAdmin") + .create(PermissionSet::record_admin_permissions(), None) + .build_and_execute(&admin) + .await?; + admin + .trail(trail_id) + .access() + .for_role("RecordAdmin") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(record_admin.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await?; + + record_admin + .trail(trail_id) + .records() + .add(Data::text("Follow-up record"), Some("event:updated".to_string()), None) + .build_and_execute(&record_admin) + .await?; + + let on_chain = admin.trail(trail_id).get().await?; + println!( + "Trail summary:\n id = {}\n creator = {}\n created_at = {}\n sequence_number = {}\n immutable_metadata = {:?}\n updatable_metadata = {:?}\n", + on_chain.id.object_id(), + on_chain.creator, + on_chain.created_at, + on_chain.sequence_number, + on_chain.immutable_metadata, + on_chain.updatable_metadata + ); + + println!( + "Roles: {:?}\nLocking config: {:?}\n", + on_chain.roles.roles.keys().collect::>(), + on_chain.locking_config + ); + + let trail = admin.trail(trail_id); + let count = trail.records().record_count().await?; + let initial_record = trail.records().get(0).await?; + let first_page = trail.records().list_page(None, 10).await?; + let record_zero_locked = trail.locking().is_record_locked(0).await?; + + println!("Record count: {count}"); + println!("Record #0: {:?}", initial_record); + println!( + "First page size: {} (has_next_page = {})", + first_page.records.len(), + first_page.has_next_page + ); + println!("Is record #0 locked? {record_zero_locked}"); + + ensure!(count == 2); + ensure!(matches!(initial_record.data, Data::Text(ref text) if text == "Initial record")); + ensure!(first_page.records.len() == 2); + + Ok(()) +} diff --git a/examples/audit-trail/08_delete_audit_trail.rs b/examples/audit-trail/08_delete_audit_trail.rs new file mode 100644 index 00000000..52e55e5a --- /dev/null +++ b/examples/audit-trail/08_delete_audit_trail.rs @@ -0,0 +1,101 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! ## Actors +//! +//! - **Admin**: Creates the trail and sets up the MaintenanceAdmin role. +//! - **MaintenanceAdmin**: Holds delete permissions. Attempts (and fails) to delete the non-empty trail, then +//! batch-deletes all records before removing the trail itself. + +use std::collections::HashSet; + +use anyhow::{Result, ensure}; +use audit_trail::core::types::{CapabilityIssueOptions, Data, InitialRecord, Permission, PermissionSet}; +use examples::get_funded_audit_trail_client; +use product_common::core_client::CoreClient; + +/// Demonstrates how to: +/// 1. Show that a non-empty trail cannot be deleted. +/// 2. Empty the trail with `delete_records_batch`. +/// 3. Delete the trail once its records are gone. +#[tokio::main] +async fn main() -> Result<()> { + println!("=== Audit Trail: Delete Trail ===\n"); + + // `admin` creates the trail and manages roles. + // `maintenance_admin` empties and deletes the trail. + let admin = get_funded_audit_trail_client().await?; + let maintenance_admin = get_funded_audit_trail_client().await?; + + let created = admin + .create_trail() + .with_initial_record(InitialRecord::new( + Data::text("Initial record"), + Some("event:created".to_string()), + None, + )) + .finish() + .build_and_execute(&admin) + .await? + .output; + + let trail = admin.trail(created.trail_id); + + trail + .access() + .for_role("MaintenanceAdmin") + .create( + PermissionSet { + permissions: HashSet::from([Permission::DeleteAllRecords, Permission::DeleteAuditTrail]), + }, + None, + ) + .build_and_execute(&admin) + .await?; + trail + .access() + .for_role("MaintenanceAdmin") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(maintenance_admin.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await?; + + let maintenance_trail = maintenance_admin.trail(created.trail_id); + + let delete_while_non_empty = maintenance_trail + .delete_audit_trail() + .build_and_execute(&maintenance_admin) + .await; + ensure!(delete_while_non_empty.is_err(), "a trail must be empty before deletion"); + println!("Deleting the non-empty trail failed as expected.\n"); + + let deleted_records = maintenance_trail + .records() + .delete_records_batch(10) + .build_and_execute(&maintenance_admin) + .await? + .output; + println!("Deleted {deleted_records} record(s) before trail removal.\n"); + + ensure!(maintenance_trail.records().record_count().await? == 0); + + let deleted_trail = maintenance_trail + .delete_audit_trail() + .build_and_execute(&maintenance_admin) + .await? + .output; + println!( + "Trail deleted:\n trail_id = {}\n timestamp = {}", + deleted_trail.trail_id, deleted_trail.timestamp + ); + + ensure!( + maintenance_trail.get().await.is_err(), + "deleted trail should no longer be readable" + ); + + Ok(()) +} diff --git a/examples/audit-trail/README.md b/examples/audit-trail/README.md new file mode 100644 index 00000000..23a0be5a --- /dev/null +++ b/examples/audit-trail/README.md @@ -0,0 +1,169 @@ +# IOTA Audit Trail Examples + +The following code examples demonstrate how to use IOTA Audit Trails for creating structured, role-based audit logs on the IOTA network. + +## Prerequisites + +Examples can be run against: + +- A local IOTA node +- An existing network, e.g., the IOTA testnet + +When setting up a local node, you'll need to publish an audit trail package as described in the IOTA documentation. You'll also need to provide environment variables for your locally deployed audit trail package to run the examples against the local node. + +If running the examples on `testnet`, use the appropriate package IDs for the testnet deployment. + +In case of running the examples against an existing network, this network needs to have a faucet to fund your accounts (the IOTA testnet (`https://api.testnet.iota.cafe`) supports this), and you need to specify this via `API_ENDPOINT`. + +## Environment Variables + +You'll need one or more of the following environment variables depending on your setup: + +| Name | Required for local node | Required for testnet | Required for other node | +| ------------------------- | :---------------------: | :------------------: | :---------------------: | +| IOTA_AUDIT_TRAIL_PKG_ID | x | x | x | +| IOTA_TF_COMPONENTS_PKG_ID | x | | | +| API_ENDPOINT | | x | x | + +> **Note:** On localnet both `IOTA_AUDIT_TRAIL_PKG_ID` and `IOTA_TF_COMPONENTS_PKG_ID` resolve to the same package ID because the TfComponents dependency is published together with the audit trail package. + +## Running Examples + +The publish script prints the required `export` statements, so use `eval` to set the variables in one step: + +```bash +eval $(./audit-trail-move/scripts/publish_package.sh) +``` + +Then run a specific example: + +```bash +cargo run --release --example +``` + +For instance, to run the `01_create_audit_trail` example: + +```bash +eval $(./audit-trail-move/scripts/publish_package.sh) +cargo run --release --example 01_create_audit_trail +``` + +To pass the variables inline instead: + +```bash +IOTA_AUDIT_TRAIL_PKG_ID=0x... IOTA_TF_COMPONENTS_PKG_ID=0x... cargo run --release --example 01_create_audit_trail +``` + +## Examples + +| Name | Information | +| :-------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------- | +| [01_create_audit_trail](https://github.com/iotaledger/notarization/tree/main/examples/audit-trail/01_create_audit_trail.rs) | Creates an audit trail, defines a `RecordAdmin` role using the Admin capability, and issues a capability for it. | +| [02_add_and_read_records](https://github.com/iotaledger/notarization/tree/main/examples/audit-trail/02_add_and_read_records.rs) | Adds follow-up records to a trail, then loads them back individually and through paginated reads. | +| [03_update_metadata](https://github.com/iotaledger/notarization/tree/main/examples/audit-trail/03_update_metadata.rs) | Updates and clears the trail's mutable metadata while preserving immutable metadata. | +| [04_configure_locking](https://github.com/iotaledger/notarization/tree/main/examples/audit-trail/04_configure_locking.rs) | Configures write and delete locks, then shows how those rules affect record creation. | +| [05_manage_access](https://github.com/iotaledger/notarization/tree/main/examples/audit-trail/05_manage_access.rs) | Creates and updates a role, then demonstrates constrained capability issuance, revoke and destroy flows, denylist cleanup, and final role removal. | +| [06_delete_records](https://github.com/iotaledger/notarization/tree/main/examples/audit-trail/06_delete_records.rs) | Deletes an individual record and then removes the remaining records in a batch. | +| [07_access_read_only_methods](https://github.com/iotaledger/notarization/tree/main/examples/audit-trail/07_access_read_only_methods.rs) | Reads back trail metadata, locking state, record counts, and paginated record data. | +| [08_delete_audit_trail](https://github.com/iotaledger/notarization/tree/main/examples/audit-trail/08_delete_audit_trail.rs) | Empties a trail and then deletes it, showing that non-empty trails cannot be removed. | + +## Advanced Examples + +| Name | Information | +| :------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------- | +| [09_tagged_records](https://github.com/iotaledger/notarization/tree/main/examples/audit-trail/advanced/09_tagged_records.rs) | Uses role tags and address-bound capabilities to restrict who may add tagged records. | +| [10_capability_constraints](https://github.com/iotaledger/notarization/tree/main/examples/audit-trail/advanced/10_capability_constraints.rs) | Shows address-bound capability use and how revocation immediately blocks future writes. | +| [11_manage_record_tags](https://github.com/iotaledger/notarization/tree/main/examples/audit-trail/advanced/11_manage_record_tags.rs) | Delegates record-tag administration and shows that in-use tags cannot be removed. | + +## Real-World Examples + +| Name | Information | +| :----------------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [01_customs_clearance](https://github.com/iotaledger/notarization/tree/main/examples/audit-trail/real-world/01_customs_clearance.rs) | Models customs clearance with role-tag restrictions, delegated capabilities, denied inspection writes, and a final write lock. | +| [02_clinical_trial](https://github.com/iotaledger/notarization/tree/main/examples/audit-trail/real-world/02_clinical_trial.rs) | Models a Phase III clinical trial with time-constrained capabilities, mid-study tag additions, deletion-window enforcement, time-locked datasets, and read-only regulator verification. | + +## Key Concepts + +### Audit Trail + +An audit trail is an on-chain object that stores an ordered sequence of records. Each trail has: + +- **Immutable metadata**: Name and description set at creation, never changes +- **Updatable metadata**: A mutable string for operational status or notes +- **Record log**: An append-only sequence of records (text or binary data) +- **Role map**: Named roles with permission sets that control who can do what +- **Locking config**: Optional write, delete-record, and delete-trail locks + +### Role-Based Access Control + +Access to trail operations is controlled via roles and capabilities: + +- **Roles** define a named set of permissions (e.g., `RecordAdmin` with `AddRecord`, `DeleteRecord`, `CorrectRecord`) +- **Capabilities** are on-chain objects issued for a role and held in a wallet — possession of a capability grants the associated permissions on a specific trail +- The trail creator automatically receives an **Admin** capability granting full administrative control (role management, capability issuance, tag management, etc.) + +### Permission Sets + +`PermissionSet` convenience constructors cover common role configurations: + +| Constructor | Permissions granted | +| :----------------------------- | :------------------------------------------------------------------------------------------------------- | +| `admin_permissions()` | AddRoles, UpdateRoles, DeleteRoles, AddCapabilities, RevokeCapabilities, AddRecordTags, DeleteRecordTags | +| `record_admin_permissions()` | AddRecord, DeleteRecord, CorrectRecord | +| `locking_admin_permissions()` | UpdateLockingConfig (and all sub-variants) | +| `cap_admin_permissions()` | AddCapabilities, RevokeCapabilities | +| `tag_admin_permissions()` | AddRecordTags, DeleteRecordTags | +| `metadata_admin_permissions()` | UpdateMetadata, DeleteMetadata | + +### Capability Constraints + +When issuing a capability, `CapabilityIssueOptions` allows restricting its use: + +- **`issued_to`**: Bind the capability to a specific wallet address +- **`valid_from_ms`**: The capability is not valid before this Unix timestamp (ms) +- **`valid_until_ms`**: The capability expires after this Unix timestamp (ms) + +### Locking + +Trails support three independent lock dimensions: + +- **Write lock** (`TimeLock`): Prevents new records from being added +- **Delete-record window** (`LockingWindow`): Time-based or count-based window during which a record can be deleted after creation +- **Delete-trail lock** (`TimeLock`): Prevents the trail itself from being destroyed + +`TimeLock` variants: `None`, `UnlockAt(u32)`, `UnlockAtMs(u64)`, `UntilDestroyed`, `Infinite`. + +## Example Scenarios + +### Audit Log Workflow + +1. **Create** a trail with immutable metadata and an initial record +2. **Define roles** (e.g., `RecordAdmin`, `Auditor`) using the Admin capability +3. **Issue capabilities** to operators or auditors +4. **Add records** using a RecordAdmin capability +5. **Query** records and trail state at any time + +### Compliance Use Cases + +- **Locked write windows** to prevent retroactive record insertion +- **Delete-record windows** to allow corrections within a time limit, then freeze +- **Role separation** to enforce least-privilege access (auditors can read, operators can write) +- **Bound capabilities** to tie a capability to a specific operator address + +## Best Practices + +1. **Separate roles by responsibility**: Use distinct roles for writing records, managing locking, and administering capabilities +2. **Bind capabilities to addresses**: Use `issued_to` to prevent capability sharing +3. **Set validity windows**: Use `valid_from_ms` / `valid_until_ms` to limit capability lifetime +4. **Use record tags**: Define a tag registry on the trail and restrict roles to specific tags for finer-grained access control +5. **Plan locking upfront**: Locking configuration is easier to set at creation than to change later + +## Security Considerations + +- Audit trails and their records are publicly readable on the blockchain +- Private keys control which capabilities a wallet holds +- Bound capabilities (`issued_to`) prevent transfer and unauthorized use +- Delete-trail locks ensure data retention requirements are met +- Revoking a capability adds it to the trail's revoked-capability registry, blocking future use + +For more detailed information about IOTA Audit Trail concepts and advanced usage, refer to the official IOTA documentation. diff --git a/examples/audit-trail/advanced/09_tagged_records.rs b/examples/audit-trail/advanced/09_tagged_records.rs new file mode 100644 index 00000000..4dcf243c --- /dev/null +++ b/examples/audit-trail/advanced/09_tagged_records.rs @@ -0,0 +1,115 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! ## Actors +//! +//! - **Admin**: Creates the trail, defines the FinanceWriter role restricted to the `finance` tag, and issues a +//! capability bound to `finance_writer`'s address. +//! - **FinanceWriter**: Holds the address-bound capability. Can add `finance`-tagged records but is blocked from +//! writing `legal`-tagged records. + +use anyhow::{Result, ensure}; +use audit_trail::core::types::{CapabilityIssueOptions, Data, InitialRecord, Permission, RoleTags}; +use examples::get_funded_audit_trail_client; +use product_common::core_client::CoreClient; + +/// Demonstrates how to: +/// 1. Create a trail with a predefined tag registry. +/// 2. Define a role that is restricted to one record tag. +/// 3. Issue a capability bound to a specific wallet address. +/// 4. Show that the holder can add only records matching the allowed tag. +#[tokio::main] +async fn main() -> Result<()> { + println!("=== Audit Trail Advanced: Tagged Records ===\n"); + + let admin = get_funded_audit_trail_client().await?; + let finance_writer = get_funded_audit_trail_client().await?; + + let created = admin + .create_trail() + .with_record_tags(["finance", "legal"]) + .with_initial_record(InitialRecord::new( + Data::text("Trail created"), + Some("event:created".to_string()), + None, + )) + .finish() + .build_and_execute(&admin) + .await? + .output; + + let trail_id = created.trail_id; + + admin + .trail(trail_id) + .access() + .for_role("FinanceWriter") + .create( + audit_trail::core::types::PermissionSet { + permissions: [Permission::AddRecord].into_iter().collect(), + }, + Some(RoleTags::new(["finance"])), + ) + .build_and_execute(&admin) + .await?; + + let issued = admin + .trail(trail_id) + .access() + .for_role("FinanceWriter") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(finance_writer.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await? + .output; + + println!( + "Issued FinanceWriter capability {} to {}\n", + issued.capability_id, + finance_writer.sender_address() + ); + + // The client automatically scans `finance_writer`'s wallet for a capability object that + // targets this trail and carries the required permission. No explicit capability ID is + // needed — the lookup happens in the background on every operation. + let finance_records = finance_writer.trail(trail_id).records(); + + let added = finance_records + .add( + Data::text("Invoice approved"), + Some("department:finance".to_string()), + Some("finance".to_string()), + ) + .build_and_execute(&finance_writer) + .await? + .output; + + println!( + "Added tagged record at sequence number {} with tag \"finance\".\n", + added.sequence_number + ); + + let wrong_tag = finance_records + .add( + Data::text("Legal review completed"), + Some("department:legal".to_string()), + Some("legal".to_string()), + ) + .build_and_execute(&finance_writer) + .await; + + ensure!( + wrong_tag.is_err(), + "a finance-scoped role must not add a legal-tagged record" + ); + + let finance_record = finance_records.get(added.sequence_number).await?; + println!("Stored tagged record: {:?}", finance_record); + + ensure!(finance_record.tag.as_deref() == Some("finance")); + + Ok(()) +} diff --git a/examples/audit-trail/advanced/10_capability_constraints.rs b/examples/audit-trail/advanced/10_capability_constraints.rs new file mode 100644 index 00000000..d56db61f --- /dev/null +++ b/examples/audit-trail/advanced/10_capability_constraints.rs @@ -0,0 +1,114 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! ## Actors +//! +//! - **Admin**: Creates the trail, defines the RecordAdmin role, and issues a capability bound specifically to +//! `intended_writer`'s address. Also performs revocation. +//! - **IntendedWriter**: The authorised holder. Writes a record successfully before revocation, then is blocked after +//! the capability is revoked. +//! - **WrongWriter**: An unauthorised actor who attempts to use the address-bound capability. All write attempts are +//! rejected by the Move contract. + +use anyhow::{Result, ensure}; +use audit_trail::core::types::{CapabilityIssueOptions, Data, InitialRecord, PermissionSet}; +use examples::get_funded_audit_trail_client; +use product_common::core_client::CoreClient; + +/// Demonstrates how to: +/// 1. Bind a capability to a specific wallet address. +/// 2. Show that a different wallet cannot use it. +/// 3. Revoke the capability and confirm the bound holder can no longer use it. +#[tokio::main] +async fn main() -> Result<()> { + println!("=== Audit Trail Advanced: Capability Constraints ===\n"); + + let admin = get_funded_audit_trail_client().await?; + let intended_writer = get_funded_audit_trail_client().await?; + let wrong_writer = get_funded_audit_trail_client().await?; + + let created = admin + .create_trail() + .with_initial_record(InitialRecord::new(Data::text("Trail created"), None, None)) + .finish() + .build_and_execute(&admin) + .await? + .output; + + let trail_id = created.trail_id; + + admin + .trail(trail_id) + .access() + .for_role("RecordAdmin") + .create(PermissionSet::record_admin_permissions(), None) + .build_and_execute(&admin) + .await?; + + let issued = admin + .trail(trail_id) + .access() + .for_role("RecordAdmin") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(intended_writer.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await? + .output; + + println!( + "Issued capability {} to {}\n", + issued.capability_id, + intended_writer.sender_address() + ); + + let denied = wrong_writer + .trail(trail_id) + .records() + .add(Data::text("Wrong writer"), None, None) + .build_and_execute(&wrong_writer) + .await; + + ensure!( + denied.is_err(), + "a capability bound to another address must not be usable" + ); + + let added = intended_writer + .trail(trail_id) + .records() + .add(Data::text("Authorized writer"), None, None) + .build_and_execute(&intended_writer) + .await? + .output; + + println!("Bound holder added record {} successfully.\n", added.sequence_number); + + admin + .trail(trail_id) + .access() + .revoke_capability(issued.capability_id, issued.valid_until) + .build_and_execute(&admin) + .await?; + + let revoked_attempt = intended_writer + .trail(trail_id) + .records() + .add(Data::text("Should fail after revoke"), None, None) + .build_and_execute(&intended_writer) + .await; + + ensure!( + revoked_attempt.is_err(), + "revoked capabilities must no longer authorize record writes" + ); + + println!( + "Revoked capability {} and verified it can no longer be used.", + issued.capability_id + ); + + Ok(()) +} diff --git a/examples/audit-trail/advanced/11_manage_record_tags.rs b/examples/audit-trail/advanced/11_manage_record_tags.rs new file mode 100644 index 00000000..d8c52def --- /dev/null +++ b/examples/audit-trail/advanced/11_manage_record_tags.rs @@ -0,0 +1,127 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! ## Actors +//! +//! - **Admin**: Creates the trail and manages roles. +//! - **TagAdmin**: Holds the TagAdmin capability. Adds and removes entries from the trail's tag registry. +//! - **FinanceWriter**: Holds a `finance`-scoped RecordAdmin capability. Writes a `finance`-tagged record that keeps +//! the `finance` tag in use and therefore unremovable. + +use anyhow::{Result, ensure}; +use audit_trail::core::types::{CapabilityIssueOptions, Data, InitialRecord, PermissionSet, RoleTags}; +use examples::get_funded_audit_trail_client; +use product_common::core_client::CoreClient; + +/// Demonstrates how to: +/// 1. Delegate record-tag registry management to a `TagAdmin` role. +/// 2. Add and remove tags from the trail registry. +/// 3. Show that tags still in use by roles or records cannot be removed. +#[tokio::main] +async fn main() -> Result<()> { + println!("=== Audit Trail Advanced: Manage Record Tags ===\n"); + + // `admin` creates the trail and manages roles. + // `tag_admin` adds and removes tags from the registry. + // `finance_writer` holds a tag-scoped capability and writes finance records. + let admin = get_funded_audit_trail_client().await?; + let tag_admin = get_funded_audit_trail_client().await?; + let finance_writer = get_funded_audit_trail_client().await?; + + let created = admin + .create_trail() + .with_record_tags(["finance"]) + .with_initial_record(InitialRecord::new(Data::text("Trail created"), None, None)) + .finish() + .build_and_execute(&admin) + .await? + .output; + + let trail_id = created.trail_id; + + admin + .trail(trail_id) + .access() + .for_role("TagAdmin") + .create(PermissionSet::tag_admin_permissions(), None) + .build_and_execute(&admin) + .await?; + admin + .trail(trail_id) + .access() + .for_role("TagAdmin") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(tag_admin.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await?; + + tag_admin + .trail(trail_id) + .tags() + .add("legal") + .build_and_execute(&tag_admin) + .await?; + + let after_add = admin.trail(trail_id).get().await?; + println!("Registry after adding \"legal\": {:?}\n", after_add.tags.tag_map); + ensure!(after_add.tags.contains_key("finance")); + ensure!(after_add.tags.contains_key("legal")); + + admin + .trail(trail_id) + .access() + .for_role("FinanceWriter") + .create( + PermissionSet::record_admin_permissions(), + Some(RoleTags::new(["finance"])), + ) + .build_and_execute(&admin) + .await?; + admin + .trail(trail_id) + .access() + .for_role("FinanceWriter") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(finance_writer.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await?; + + finance_writer + .trail(trail_id) + .records() + .add(Data::text("Tagged finance entry"), None, Some("finance".to_string())) + .build_and_execute(&finance_writer) + .await?; + + let remove_finance = tag_admin + .trail(trail_id) + .tags() + .remove("finance") + .build_and_execute(&tag_admin) + .await; + ensure!( + remove_finance.is_err(), + "a tag referenced by a role or record must not be removable" + ); + + tag_admin + .trail(trail_id) + .tags() + .remove("legal") + .build_and_execute(&tag_admin) + .await?; + + let after_remove = admin.trail(trail_id).get().await?; + println!("Registry after removing \"legal\": {:?}\n", after_remove.tags.tag_map); + + ensure!(after_remove.tags.contains_key("finance")); + ensure!(!after_remove.tags.contains_key("legal")); + + Ok(()) +} diff --git a/examples/audit-trail/real-world/01_customs_clearance.rs b/examples/audit-trail/real-world/01_customs_clearance.rs new file mode 100644 index 00000000..6c651fa9 --- /dev/null +++ b/examples/audit-trail/real-world/01_customs_clearance.rs @@ -0,0 +1,368 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! # Customs Clearance Example +//! +//! This example models a customs-clearance process for a single shipment. +//! +//! ## Actors +//! +//! - **Admin**: Creates the trail and sets up all roles and capabilities. +//! - **DocsOperator**: Handles document submission (invoices, packing lists). Writes only `documents`-tagged records. +//! - **ExportBroker**: Files export declarations and records clearance decisions at the origin. Writes only +//! `export`-tagged records. +//! - **ImportBroker**: Handles duty assessment and import clearance at the destination. Writes only `import`-tagged +//! records. +//! - **Inspector**: Records the outcome of a customs physical inspection. Writes only `inspection`-tagged records; the +//! role is created mid-process when an inspection is triggered. +//! - **Supervisor**: Updates the mutable trail metadata (processing status). No record-write permissions. +//! - **LockingAdmin**: Freezes the trail once the shipment is fully cleared. +//! +//! ## How the trail is used +//! +//! - `immutable_metadata`: shipment and declaration identity +//! - `updatable_metadata`: the current customs-processing status +//! - record tags: `documents`, `export`, `import`, and `inspection` +//! - roles and capabilities: each operational role writes only the events it owns +//! - locking: writes are frozen once the shipment is fully cleared + +use anyhow::{Result, ensure}; +use audit_trail::AuditTrailClient; +use audit_trail::core::types::{ + CapabilityIssueOptions, Data, ImmutableMetadata, InitialRecord, LockingConfig, LockingWindow, PermissionSet, + RoleTags, TimeLock, +}; +use examples::get_funded_audit_trail_client; +use iota_sdk::types::base_types::{IotaAddress, ObjectID}; +use product_common::core_client::CoreClient; +use product_common::test_utils::InMemSigner; +use sha2::{Digest, Sha256}; + +#[tokio::main] +async fn main() -> Result<()> { + println!("=== Customs Clearance ===\n"); + + let admin = get_funded_audit_trail_client().await?; + let docs_operator = get_funded_audit_trail_client().await?; + let export_broker = get_funded_audit_trail_client().await?; + let import_broker = get_funded_audit_trail_client().await?; + let supervisor = get_funded_audit_trail_client().await?; + let locking_admin = get_funded_audit_trail_client().await?; + let inspector = get_funded_audit_trail_client().await?; + + // === Create the customs-clearance trail === + + println!("Creating a customs-clearance trail..."); + + let created = admin + .create_trail() + .with_record_tags(["documents", "export", "import", "inspection"]) + .with_trail_metadata(ImmutableMetadata::new( + "Shipment SHP-2026-CLEAR-001".to_string(), + Some("Route: Hamburg, Germany -> Nairobi, Kenya | Declaration: DEC-2026-44017".to_string()), + )) + .with_updatable_metadata("Status: Documents Pending") + .with_locking_config(LockingConfig { + delete_record_window: LockingWindow::CountBased { count: 2 }, + delete_trail_lock: TimeLock::None, + write_lock: TimeLock::None, + }) + .with_initial_record(InitialRecord::new( + Data::text("Customs clearance case opened for inbound shipment"), + Some("event:case_opened".to_string()), + Some("documents".to_string()), + )) + .finish() + .build_and_execute(&admin) + .await? + .output; + + let trail_id = created.trail_id; + + // === Set up roles and capabilities for each actor === + + issue_tagged_record_role( + &admin, + trail_id, + "DocsOperator", + "documents", + docs_operator.sender_address(), + ) + .await?; + issue_tagged_record_role( + &admin, + trail_id, + "ExportBroker", + "export", + export_broker.sender_address(), + ) + .await?; + issue_tagged_record_role( + &admin, + trail_id, + "ImportBroker", + "import", + import_broker.sender_address(), + ) + .await?; + + admin + .trail(trail_id) + .access() + .for_role("Supervisor") + .create(PermissionSet::metadata_admin_permissions(), None) + .build_and_execute(&admin) + .await?; + admin + .trail(trail_id) + .access() + .for_role("Supervisor") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(supervisor.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await?; + + admin + .trail(trail_id) + .access() + .for_role("LockingAdmin") + .create(PermissionSet::locking_admin_permissions(), None) + .build_and_execute(&admin) + .await?; + admin + .trail(trail_id) + .access() + .for_role("LockingAdmin") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(locking_admin.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await?; + + // === Document submission === + + // Documents are stored off-chain in an access-controlled environment (e.g. a TWIN node). + // Only the SHA-256 fingerprint is committed on-chain for tamper-evidence. + let invoice_hash = Sha256::digest(b"invoice-SHP-2026-CLEAR-001-v1.pdf"); + let docs_uploaded = docs_operator + .trail(trail_id) + .records() + .add( + Data::bytes(invoice_hash.to_vec()), + Some("event:documents_uploaded".to_string()), + Some("documents".to_string()), + ) + .build_and_execute(&docs_operator) + .await? + .output; + + println!("Docs operator added record #{}.\n", docs_uploaded.sequence_number); + + supervisor + .trail(trail_id) + .update_metadata(Some("Status: Awaiting Export Clearance".to_string())) + .build_and_execute(&supervisor) + .await?; + + // === Export clearance === + + let export_filed = export_broker + .trail(trail_id) + .records() + .add( + Data::text("Export declaration filed with German customs"), + Some("event:export_declaration_filed".to_string()), + Some("export".to_string()), + ) + .build_and_execute(&export_broker) + .await? + .output; + + let export_cleared = export_broker + .trail(trail_id) + .records() + .add( + Data::text("Export clearance granted by Hamburg customs office"), + Some("event:export_cleared".to_string()), + Some("export".to_string()), + ) + .build_and_execute(&export_broker) + .await? + .output; + + println!( + "Export broker added records #{} and #{}.\n", + export_filed.sequence_number, export_cleared.sequence_number + ); + + supervisor + .trail(trail_id) + .update_metadata(Some("Status: Awaiting Import Clearance".to_string())) + .build_and_execute(&supervisor) + .await?; + + // === Inspection gate === + + // The import broker does not hold an inspection-scoped capability at this point. + // The write attempt must fail to prove that tag-based access control is enforced. + let denied_inspection = import_broker + .trail(trail_id) + .records() + .add( + Data::text("Import broker attempted to record an inspection result"), + Some("event:invalid_inspection_write".to_string()), + Some("inspection".to_string()), + ) + .build_and_execute(&import_broker) + .await; + + ensure!( + denied_inspection.is_err(), + "inspection-tagged writes should fail before an inspection-scoped capability exists" + ); + println!("Inspection write was correctly denied before the inspector role existed.\n"); + + // A customs inspection is triggered; the inspector role is created and issued mid-process. + issue_tagged_record_role(&admin, trail_id, "Inspector", "inspection", inspector.sender_address()).await?; + + let inspection_done = inspector + .trail(trail_id) + .records() + .add( + Data::text("Customs inspection completed with no discrepancies"), + Some("event:inspection_completed".to_string()), + Some("inspection".to_string()), + ) + .build_and_execute(&inspector) + .await? + .output; + + println!("Inspector added record #{}.\n", inspection_done.sequence_number); + + // === Import clearance === + + let duty_assessed = import_broker + .trail(trail_id) + .records() + .add( + Data::text("Import duty assessed and paid"), + Some("event:duty_assessed".to_string()), + Some("import".to_string()), + ) + .build_and_execute(&import_broker) + .await? + .output; + + let import_cleared = import_broker + .trail(trail_id) + .records() + .add( + Data::text("Import clearance granted by Nairobi customs"), + Some("event:import_cleared".to_string()), + Some("import".to_string()), + ) + .build_and_execute(&import_broker) + .await? + .output; + + println!( + "Import broker added records #{} and #{}.\n", + duty_assessed.sequence_number, import_cleared.sequence_number + ); + + supervisor + .trail(trail_id) + .update_metadata(Some("Status: Cleared".to_string())) + .build_and_execute(&supervisor) + .await?; + + // === Final lock and verification === + + locking_admin + .trail(trail_id) + .locking() + .update_write_lock(TimeLock::Infinite) + .build_and_execute(&locking_admin) + .await?; + + let after_lock = admin.trail(trail_id).get().await?; + println!( + "Write lock after clearance: {:?}\n", + after_lock.locking_config.write_lock + ); + + let late_note = docs_operator + .trail(trail_id) + .records() + .add( + Data::text("Late customs note after the case was closed"), + Some("event:late_note".to_string()), + Some("documents".to_string()), + ) + .build_and_execute(&docs_operator) + .await; + + ensure!( + late_note.is_err(), + "cleared customs trail should reject late writes after the final lock" + ); + + let trail = admin.trail(trail_id); + let first_page = trail.records().list_page(None, 20).await?; + + println!("Recorded customs events:"); + for (sequence_number, record) in &first_page.records { + println!( + " #{} | {:?} | tag={:?} | {:?}", + sequence_number, record.data, record.tag, record.metadata + ); + } + + ensure!( + first_page.records.len() == 7, + "expected 7 customs records including the initial case-opened record" + ); + ensure!( + trail.get().await?.updatable_metadata.as_deref() == Some("Status: Cleared"), + "customs case should finish in cleared state" + ); + + println!("\nCustoms clearance completed successfully."); + + Ok(()) +} + +async fn issue_tagged_record_role( + client: &AuditTrailClient, + trail_id: ObjectID, + role_name: &str, + tag: &str, + issued_to: IotaAddress, +) -> Result<()> { + client + .trail(trail_id) + .access() + .for_role(role_name) + .create(PermissionSet::record_admin_permissions(), Some(RoleTags::new([tag]))) + .build_and_execute(client) + .await?; + + client + .trail(trail_id) + .access() + .for_role(role_name) + .issue_capability(CapabilityIssueOptions { + issued_to: Some(issued_to), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(client) + .await?; + + Ok(()) +} diff --git a/examples/audit-trail/real-world/02_clinical_trial.rs b/examples/audit-trail/real-world/02_clinical_trial.rs new file mode 100644 index 00000000..ec5ca145 --- /dev/null +++ b/examples/audit-trail/real-world/02_clinical_trial.rs @@ -0,0 +1,390 @@ +// Copyright 2020-2026 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +//! # Clinical Trial Data-Integrity Example +//! +//! This example models a Phase III clinical trial where an immutable audit trail +//! guarantees data integrity, role-scoped access, and time-constrained oversight. +//! +//! ## Actors +//! +//! - **Admin**: Creates the trail and sets up all roles and capabilities. +//! - **Enroller**: Writes enrollment events. Restricted to the `enrollment` tag. +//! - **SafetyOfficer**: Records adverse events and safety observations. Restricted to `safety`. +//! - **EfficacyReviewer**: Records treatment outcomes. Restricted to `efficacy`. +//! - **PkAnalyst**: Records pharmacokinetic results. Restricted to the `pk` tag that is added mid-study when a PK +//! sub-study is initiated. +//! - **Monitor**: Updates the mutable study-phase metadata. Access is time-windowed to the active study period (90 days +//! from now). +//! - **DataSafetyBoard**: Controls write and delete locks. Freezes the dataset after review. +//! - **Regulator**: Read-only verifier. In production this would use `AuditTrailClientReadOnly` (no signing key); here +//! a funded client is used to keep the example self-contained. +//! +//! ## How the trail is used +//! +//! - `immutable_metadata`: protocol identity and study description +//! - `updatable_metadata`: current study phase (updated as the trial progresses) +//! - record tags: `enrollment`, `safety`, `efficacy`, `pk` (added mid-study) +//! - roles and capabilities: each role writes only its designated tag +//! - time-constrained capabilities: Monitor access is windowed to the study period +//! - locking: a deletion window protects recent records; a time-lock freezes the dataset after the Data Safety Board +//! completes its review +//! - read-only verification: a regulator inspects the trail without write access + +use anyhow::{Result, ensure}; +use audit_trail::AuditTrailClient; +use audit_trail::core::types::{ + CapabilityIssueOptions, Data, ImmutableMetadata, InitialRecord, LockingConfig, LockingWindow, PermissionSet, + RoleTags, TimeLock, +}; +use examples::get_funded_audit_trail_client; +use iota_sdk::types::base_types::{IotaAddress, ObjectID}; +use product_common::core_client::CoreClient; +use product_common::test_utils::InMemSigner; + +#[tokio::main] +async fn main() -> Result<()> { + println!("=== Clinical Trial Data Integrity ===\n"); + + let admin = get_funded_audit_trail_client().await?; + let enroller = get_funded_audit_trail_client().await?; + let safety_officer = get_funded_audit_trail_client().await?; + let efficacy_reviewer = get_funded_audit_trail_client().await?; + let pk_analyst = get_funded_audit_trail_client().await?; + let monitor = get_funded_audit_trail_client().await?; + let data_safety_board = get_funded_audit_trail_client().await?; + let regulator = get_funded_audit_trail_client().await?; + + // ----------------------------------------------------------------------- + // 1. Create the trial trail + // ----------------------------------------------------------------------- + println!("Creating the clinical-trial audit trail..."); + + let created = admin + .create_trail() + .with_record_tags(["enrollment", "safety", "efficacy"]) + .with_trail_metadata(ImmutableMetadata::new( + "Protocol CTR-2026-03742".to_string(), + Some("Phase III: Efficacy of Drug X vs Placebo in Moderate-to-Severe Asthma".to_string()), + )) + .with_updatable_metadata("Phase: Enrollment") + .with_locking_config(LockingConfig { + delete_record_window: LockingWindow::CountBased { count: 3 }, + delete_trail_lock: TimeLock::None, + write_lock: TimeLock::None, + }) + .with_initial_record(InitialRecord::new( + Data::text("Clinical trial CTR-2026-03742 opened for enrollment"), + Some("event:trial_opened".to_string()), + Some("enrollment".to_string()), + )) + .finish() + .build_and_execute(&admin) + .await? + .output; + + let trail_id = created.trail_id; + println!("Trail created with ID {trail_id}\n"); + + // ----------------------------------------------------------------------- + // 2. Define roles with tag-scoped permissions + // ----------------------------------------------------------------------- + println!("Defining study roles..."); + + issue_tagged_record_role(&admin, trail_id, "Enroller", "enrollment", enroller.sender_address()).await?; + issue_tagged_record_role( + &admin, + trail_id, + "SafetyOfficer", + "safety", + safety_officer.sender_address(), + ) + .await?; + issue_tagged_record_role( + &admin, + trail_id, + "EfficacyReviewer", + "efficacy", + efficacy_reviewer.sender_address(), + ) + .await?; + + // Monitor can update metadata (study phase) but only during the study window. + admin + .trail(trail_id) + .access() + .for_role("Monitor") + .create(PermissionSet::metadata_admin_permissions(), None) + .build_and_execute(&admin) + .await?; + + let now_ms = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH)? + .as_millis() as u64; + // Monitor access is valid for 90 days from now. + let study_end_ms = now_ms + 90 * 24 * 60 * 60 * 1000; + + admin + .trail(trail_id) + .access() + .for_role("Monitor") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(monitor.sender_address()), + valid_from_ms: Some(now_ms), + valid_until_ms: Some(study_end_ms), + }) + .build_and_execute(&admin) + .await?; + + println!("Monitor capability issued (valid for 90 days from now, ends at timestamp {study_end_ms})\n"); + + // Data Safety Board can manage locking. + admin + .trail(trail_id) + .access() + .for_role("DataSafetyBoard") + .create(PermissionSet::locking_admin_permissions(), None) + .build_and_execute(&admin) + .await?; + admin + .trail(trail_id) + .access() + .for_role("DataSafetyBoard") + .issue_capability(CapabilityIssueOptions { + issued_to: Some(data_safety_board.sender_address()), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(&admin) + .await?; + + // ----------------------------------------------------------------------- + // 3. Enrollment phase — add enrollment records + // ----------------------------------------------------------------------- + println!("--- Enrollment Phase ---"); + + let enrolled = enroller + .trail(trail_id) + .records() + .add( + Data::text("Patient P-101 enrolled at Site Hamburg"), + Some("event:patient_enrolled".to_string()), + Some("enrollment".to_string()), + ) + .build_and_execute(&enroller) + .await? + .output; + println!("Enroller added record #{}.\n", enrolled.sequence_number); + + // ----------------------------------------------------------------------- + // 4. Add safety and efficacy records + // ----------------------------------------------------------------------- + println!("--- Study Data Collection ---"); + + let safety_event = safety_officer + .trail(trail_id) + .records() + .add( + Data::text("Adverse event: mild headache reported by Patient P-101"), + Some("event:adverse_event".to_string()), + Some("safety".to_string()), + ) + .build_and_execute(&safety_officer) + .await? + .output; + + let efficacy_record = efficacy_reviewer + .trail(trail_id) + .records() + .add( + Data::text("Week 12: FEV1 improvement of 320 mL over baseline for P-101"), + Some("event:efficacy_observed".to_string()), + Some("efficacy".to_string()), + ) + .build_and_execute(&efficacy_reviewer) + .await? + .output; + + println!( + "SafetyOfficer added record #{}, EfficacyReviewer added record #{}.\n", + safety_event.sequence_number, efficacy_record.sequence_number + ); + + // ----------------------------------------------------------------------- + // 5. Add a new tag mid-study (pharmacokinetics) + // ----------------------------------------------------------------------- + println!("--- Mid-Study Amendment ---"); + + // Admin adds the new tag and creates a role for the PK analyst. + admin.trail(trail_id).tags().add("pk").build_and_execute(&admin).await?; + println!("Added tag 'pk' (pharmacokinetics) to the trail."); + + issue_tagged_record_role(&admin, trail_id, "PkAnalyst", "pk", pk_analyst.sender_address()).await?; + + let pk_record = pk_analyst + .trail(trail_id) + .records() + .add( + Data::text("PK analysis: Cmax reached at 2.4 h, half-life 8.7 h"), + Some("event:pk_result".to_string()), + Some("pk".to_string()), + ) + .build_and_execute(&pk_analyst) + .await? + .output; + println!("PkAnalyst added record #{}.\n", pk_record.sequence_number); + + // ----------------------------------------------------------------------- + // 6. Deletion window protects recent records + // ----------------------------------------------------------------------- + println!("--- Deletion Window Enforcement ---"); + + let delete_attempt = pk_analyst + .trail(trail_id) + .records() + .delete(pk_record.sequence_number) + .build_and_execute(&pk_analyst) + .await; + + ensure!( + delete_attempt.is_err(), + "recent records must be protected by the count-based deletion window" + ); + println!( + "Record #{} is within the deletion window (newest 3) and cannot be deleted.\n", + pk_record.sequence_number + ); + + // ----------------------------------------------------------------------- + // 7. Monitor updates study phase metadata + // ----------------------------------------------------------------------- + println!("--- Metadata Update ---"); + + monitor + .trail(trail_id) + .update_metadata(Some("Phase: Data Review".to_string())) + .build_and_execute(&monitor) + .await?; + + let current_state = admin.trail(trail_id).get().await?; + println!("Study phase updated to: {:?}\n", current_state.updatable_metadata); + + // ----------------------------------------------------------------------- + // 8. Data Safety Board locks the study dataset + // ----------------------------------------------------------------------- + println!("--- Data Safety Board Lock ---"); + + // Lock writes until a specific future timestamp (e.g. 1 year from now), + // after which the dataset becomes permanently locked. + let lock_until_ms = now_ms + 365 * 24 * 60 * 60 * 1000; // 1 year from now + + data_safety_board + .trail(trail_id) + .locking() + .update_write_lock(TimeLock::UnlockAtMs(lock_until_ms)) + .build_and_execute(&data_safety_board) + .await?; + + let locked_trail = admin.trail(trail_id).get().await?; + println!( + "Write lock set to UnlockAtMs({}) — writes blocked until that timestamp.\n", + lock_until_ms + ); + println!("Current locking config: {:?}\n", locked_trail.locking_config); + + // Also lock the trail from deletion permanently. + data_safety_board + .trail(trail_id) + .locking() + .update_delete_trail_lock(TimeLock::Infinite) + .build_and_execute(&data_safety_board) + .await?; + + let final_locking = admin.trail(trail_id).get().await?; + println!( + "Delete-trail lock set to {:?} — trail cannot be deleted.\n", + final_locking.locking_config.delete_trail_lock + ); + + // ----------------------------------------------------------------------- + // 9. Regulator read-only verification + // ----------------------------------------------------------------------- + println!("--- Regulator Verification ---"); + + // In production the regulator would use AuditTrailClientReadOnly (no signer). + let regulator_handle = regulator.trail(trail_id); + + let on_chain = regulator_handle.get().await?; + println!("Protocol: {:?}", on_chain.immutable_metadata); + println!("Phase: {:?}", on_chain.updatable_metadata); + println!("Roles: {:?}", on_chain.roles.roles.keys().collect::>()); + println!("Tags: {:?}", on_chain.tags.tag_map.keys().collect::>()); + + let first_page = regulator_handle.records().list_page(None, 20).await?; + println!("\nVerified records ({} total):", first_page.records.len()); + for (seq, record) in &first_page.records { + println!(" #{} | tag={:?} | {:?}", seq, record.tag, record.metadata); + } + + // ----------------------------------------------------------------------- + // 10. Assertions + // ----------------------------------------------------------------------- + ensure!( + first_page.records.len() == 5, + "expected 5 records (initial + enrolled + safety + efficacy + pk)" + ); + ensure!( + on_chain.tags.tag_map.contains_key("pk"), + "the 'pk' tag must exist after mid-study amendment" + ); + ensure!( + on_chain.locking_config.delete_record_window == LockingWindow::CountBased { count: 3 }, + "deletion window must remain count-based with count 3" + ); + ensure!( + on_chain.locking_config.delete_trail_lock == TimeLock::Infinite, + "delete-trail lock must be Infinite" + ); + ensure!( + matches!(on_chain.locking_config.write_lock, TimeLock::UnlockAtMs(_)), + "write lock must be UnlockAtMs" + ); + ensure!( + on_chain.updatable_metadata.as_deref() == Some("Phase: Data Review"), + "study phase must be 'Data Review'" + ); + + println!("\nClinical trial data-integrity verification completed successfully."); + + Ok(()) +} + +async fn issue_tagged_record_role( + client: &AuditTrailClient, + trail_id: ObjectID, + role_name: &str, + tag: &str, + issued_to: IotaAddress, +) -> Result<()> { + client + .trail(trail_id) + .access() + .for_role(role_name) + .create(PermissionSet::record_admin_permissions(), Some(RoleTags::new([tag]))) + .build_and_execute(client) + .await?; + + client + .trail(trail_id) + .access() + .for_role(role_name) + .issue_capability(CapabilityIssueOptions { + issued_to: Some(issued_to), + valid_from_ms: None, + valid_until_ms: None, + }) + .build_and_execute(client) + .await?; + + Ok(()) +} diff --git a/examples/audit-trail/run.sh b/examples/audit-trail/run.sh new file mode 100755 index 00000000..d6c68a03 --- /dev/null +++ b/examples/audit-trail/run.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# Script to run all audit trail examples +# Usage: ./run.sh +# Make sure to set IOTA_AUDIT_TRAIL_PKG_ID and IOTA_TF_COMPONENTS_PKG_ID environment variables + +if [[ -z $IOTA_AUDIT_TRAIL_PKG_ID || -z $IOTA_TF_COMPONENTS_PKG_ID ]]; then + echo "Error: IOTA_AUDIT_TRAIL_PKG_ID environment variable is not set" + echo "Usage: IOTA_AUDIT_TRAIL_PKG_ID=0x... IOTA_TF_COMPONENTS_PKG_ID=0x... ./run.sh" + echo "" + echo "On localnet, you can set both variables using:" + echo " eval \$(./audit-trail-move/scripts/publish_package.sh)" + exit 1 +fi + +echo "Running all audit trail examples..." +echo "AuditTrail Package ID: $IOTA_AUDIT_TRAIL_PKG_ID" +echo "TfComponents Package ID: $IOTA_TF_COMPONENTS_PKG_ID" +echo "================================" + +examples=( + "01_create_audit_trail" + "02_add_and_read_records" + "03_update_metadata" + "04_configure_locking" + "05_manage_access" + "06_delete_records" + "07_access_read_only_methods" + "08_delete_audit_trail" + "09_tagged_records" + "10_capability_constraints" + "11_manage_record_tags" + "01_customs_clearance" + "02_clinical_trial" +) + +for example in "${examples[@]}"; do + echo "" + echo "Running Audit Trail: $example" + echo "------------------------" + cargo run --release --example "$example" + if [ $? -ne 0 ]; then + echo "Error: Failed to run $example" + exit 1 + fi +done + +echo "" +echo "All Audit Trail examples completed successfully!" diff --git a/examples/01_create_locked_notarization.rs b/examples/notarization/01_create_locked_notarization.rs similarity index 95% rename from examples/01_create_locked_notarization.rs rename to examples/notarization/01_create_locked_notarization.rs index e3280550..ab028978 100644 --- a/examples/01_create_locked_notarization.rs +++ b/examples/notarization/01_create_locked_notarization.rs @@ -4,7 +4,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use anyhow::Result; -use examples::get_funded_client; +use examples::get_funded_notarization_client; use notarization::core::types::{NotarizationMethod, OnChainNotarization, State, TimeLock}; use product_common::transaction::TransactionOutput; @@ -13,7 +13,7 @@ async fn main() -> Result<()> { println!("Creating a locked notarization example"); // Create a notarization client - let notarization_client = get_funded_client().await?; + let notarization_client = get_funded_notarization_client().await?; // Calculate unlock time (24 hours from now) let now_ts = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); diff --git a/examples/02_create_dynamic_notarization.rs b/examples/notarization/02_create_dynamic_notarization.rs similarity index 96% rename from examples/02_create_dynamic_notarization.rs rename to examples/notarization/02_create_dynamic_notarization.rs index 9bb608df..2c03fab2 100644 --- a/examples/02_create_dynamic_notarization.rs +++ b/examples/notarization/02_create_dynamic_notarization.rs @@ -4,7 +4,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use anyhow::Result; -use examples::get_funded_client; +use examples::get_funded_notarization_client; use notarization::core::types::{NotarizationMethod, OnChainNotarization, State, TimeLock}; use product_common::transaction::TransactionOutput; @@ -13,7 +13,7 @@ async fn main() -> Result<()> { println!("Creating a dynamic notarization example"); // Create a notarization client - let notarization_client = get_funded_client().await?; + let notarization_client = get_funded_notarization_client().await?; println!("Creating a simple dynamic notarization without locks..."); diff --git a/examples/03_update_dynamic_notarization.rs b/examples/notarization/03_update_dynamic_notarization.rs similarity index 95% rename from examples/03_update_dynamic_notarization.rs rename to examples/notarization/03_update_dynamic_notarization.rs index f7b9dc1d..47fe2792 100644 --- a/examples/03_update_dynamic_notarization.rs +++ b/examples/notarization/03_update_dynamic_notarization.rs @@ -2,14 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::Result; -use examples::get_funded_client; +use examples::get_funded_notarization_client; use notarization::core::types::State; #[tokio::main] async fn main() -> Result<()> { println!("Demonstrating update on dynamic notarization"); - let notarization_client = get_funded_client().await?; + let notarization_client = get_funded_notarization_client().await?; println!("Creating a dynamic notarization..."); diff --git a/examples/04_destroy_notarization.rs b/examples/notarization/04_destroy_notarization.rs similarity index 98% rename from examples/04_destroy_notarization.rs rename to examples/notarization/04_destroy_notarization.rs index db404144..d023e63e 100644 --- a/examples/04_destroy_notarization.rs +++ b/examples/notarization/04_destroy_notarization.rs @@ -4,7 +4,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use anyhow::Result; -use examples::get_funded_client; +use examples::get_funded_notarization_client; use notarization::core::types::{State, TimeLock}; #[tokio::main] @@ -12,7 +12,7 @@ async fn main() -> Result<()> { println!("Demonstrating notarization destruction scenarios"); // Create a notarization client - let notarization_client = get_funded_client().await?; + let notarization_client = get_funded_notarization_client().await?; // Scenario 1: Destroy an unlocked dynamic notarization (should succeed) println!("📝 Scenario 1: Creating and destroying an unlocked dynamic notarization..."); diff --git a/examples/05_update_state.rs b/examples/notarization/05_update_state.rs similarity index 96% rename from examples/05_update_state.rs rename to examples/notarization/05_update_state.rs index 406eb5fd..731df426 100644 --- a/examples/05_update_state.rs +++ b/examples/notarization/05_update_state.rs @@ -2,14 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::Result; -use examples::get_funded_client; +use examples::get_funded_notarization_client; use notarization::core::types::State; #[tokio::main] async fn main() -> Result<()> { println!("Demonstrating state updates on dynamic notarization"); - let notarization_client = get_funded_client().await?; + let notarization_client = get_funded_notarization_client().await?; println!("Creating a dynamic notarization for state updates..."); diff --git a/examples/06_update_metadata.rs b/examples/notarization/06_update_metadata.rs similarity index 97% rename from examples/06_update_metadata.rs rename to examples/notarization/06_update_metadata.rs index 8bbf47cc..1ed5984b 100644 --- a/examples/06_update_metadata.rs +++ b/examples/notarization/06_update_metadata.rs @@ -2,14 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::Result; -use examples::get_funded_client; +use examples::get_funded_notarization_client; use notarization::core::types::State; #[tokio::main] async fn main() -> Result<()> { println!("Demonstrating metadata updates on dynamic notarization"); - let notarization_client = get_funded_client().await?; + let notarization_client = get_funded_notarization_client().await?; println!("Creating a dynamic notarization for metadata updates..."); diff --git a/examples/07_transfer_dynamic_notarization.rs b/examples/notarization/07_transfer_dynamic_notarization.rs similarity index 98% rename from examples/07_transfer_dynamic_notarization.rs rename to examples/notarization/07_transfer_dynamic_notarization.rs index b0473665..1d459d99 100644 --- a/examples/07_transfer_dynamic_notarization.rs +++ b/examples/notarization/07_transfer_dynamic_notarization.rs @@ -4,7 +4,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use anyhow::Result; -use examples::get_funded_client; +use examples::get_funded_notarization_client; use iota_sdk::types::base_types::IotaAddress; use notarization::core::types::{State, TimeLock}; @@ -12,7 +12,7 @@ use notarization::core::types::{State, TimeLock}; async fn main() -> Result<()> { println!("Demonstrating notarization transfer scenarios"); - let notarization_client = get_funded_client().await?; + let notarization_client = get_funded_notarization_client().await?; // Generate random addresses for transfer recipients let alice = IotaAddress::random_for_testing_only(); diff --git a/examples/08_access_read_only_methods.rs b/examples/notarization/08_access_read_only_methods.rs similarity index 98% rename from examples/08_access_read_only_methods.rs rename to examples/notarization/08_access_read_only_methods.rs index f42ad05c..a2d89a85 100644 --- a/examples/08_access_read_only_methods.rs +++ b/examples/notarization/08_access_read_only_methods.rs @@ -4,14 +4,14 @@ use std::time::{SystemTime, UNIX_EPOCH}; use anyhow::Result; -use examples::get_funded_client; +use examples::get_funded_notarization_client; use notarization::core::types::{State, TimeLock}; #[tokio::main] async fn main() -> Result<()> { println!("Demonstrating read-only methods for notarization inspection"); - let notarization_client = get_funded_client().await?; + let notarization_client = get_funded_notarization_client().await?; // Create a comprehensive dynamic notarization for testing println!("Creating a dynamic notarization with comprehensive metadata..."); diff --git a/examples/README.md b/examples/notarization/README.md similarity index 75% rename from examples/README.md rename to examples/notarization/README.md index 783c5af1..d1264fca 100644 --- a/examples/README.md +++ b/examples/notarization/README.md @@ -42,25 +42,25 @@ IOTA_NOTARIZATION_PKG_ID=0x... cargo run --release --example 01_create_locked_no The following basic CRUD (Create, Read, Update, Delete) examples are available: -| Name | Information | -| :------------------------------------------------------------------------------------------------------------------------------------ | :-------------------------------------------------------------------------------- | -| [01_create_locked_notarization](https://github.com/iotaledger/notarization/tree/main/examples/01_create_locked_notarization.rs) | Demonstrates how to create a locked notarization with delete locks. | -| [02_create_dynamic_notarization](https://github.com/iotaledger/notarization/tree/main/examples/02_create_dynamic_notarization.rs) | Demonstrates how to create dynamic notarizations with and without transfer locks. | -| [03_update_dynamic_notarization](https://github.com/iotaledger/notarization/tree/main/examples/03_update_dynamic_notarization.rs) | Demonstrates that dynamic notarizations can be updated | -| [04_destroy_notarization](https://github.com/iotaledger/notarization/tree/main/examples/04_destroy_notarization.rs) | Demonstrates notarization destruction scenarios based on lock types. | -| [05_update_state](https://github.com/iotaledger/notarization/tree/main/examples/05_update_state.rs) | Demonstrates state updates on dynamic notarizations including binary data. | -| [06_update_metadata](https://github.com/iotaledger/notarization/tree/main/examples/06_update_metadata.rs) | Demonstrates metadata updates and their behavior vs state updates. | -| [07_transfer_dynamic_notarization](https://github.com/iotaledger/notarization/tree/main/examples/07_transfer_dynamic_notarization.rs) | Demonstrates transfer scenarios for different notarization types and lock states. | -| [08_access_read_only_methods](https://github.com/iotaledger/notarization/tree/main/examples/08_access_read_only_methods.rs) | Comprehensive demonstration of all read-only inspection methods. | +| Name | Information | +| :------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------- | +| [01_create_locked_notarization](https://github.com/iotaledger/notarization/tree/main/examples/notarization/01_create_locked_notarization.rs) | Demonstrates how to create a locked notarization with delete locks. | +| [02_create_dynamic_notarization](https://github.com/iotaledger/notarization/tree/main/examples/notarization/02_create_dynamic_notarization.rs) | Demonstrates how to create dynamic notarizations with and without transfer locks. | +| [03_update_dynamic_notarization](https://github.com/iotaledger/notarization/tree/main/examples/notarization/03_update_dynamic_notarization.rs) | Demonstrates that dynamic notarizations can be updated | +| [04_destroy_notarization](https://github.com/iotaledger/notarization/tree/main/examples/notarization/04_destroy_notarization.rs) | Demonstrates notarization destruction scenarios based on lock types. | +| [05_update_state](https://github.com/iotaledger/notarization/tree/main/examples/notarization/05_update_state.rs) | Demonstrates state updates on dynamic notarizations including binary data. | +| [06_update_metadata](https://github.com/iotaledger/notarization/tree/main/examples/notarization/06_update_metadata.rs) | Demonstrates metadata updates and their behavior vs state updates. | +| [07_transfer_dynamic_notarization](https://github.com/iotaledger/notarization/tree/main/examples/notarization/07_transfer_dynamic_notarization.rs) | Demonstrates transfer scenarios for different notarization types and lock states. | +| [08_access_read_only_methods](https://github.com/iotaledger/notarization/tree/main/examples/notarization/08_access_read_only_methods.rs) | Comprehensive demonstration of all read-only inspection methods. | ## Real-World Examples The following examples demonstrate practical use cases with proper field usage: -| Name | Information | -| :--------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------- | -| [iot_weather_station](https://github.com/iotaledger/notarization/tree/main/examples/real-world/iot_weather_station.rs) | IoT weather station using dynamic notarization for continuous sensor data updates. | -| [legal_contract](https://github.com/iotaledger/notarization/tree/main/examples/real-world/legal_contract.rs) | Legal contract using locked notarization for immutable document hash attestation. | +| Name | Information | +| :---------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------- | +| [iot_weather_station](https://github.com/iotaledger/notarization/tree/main/examples/notarization/real-world/iot_weather_station.rs) | IoT weather station using dynamic notarization for continuous sensor data updates. | +| [legal_contract](https://github.com/iotaledger/notarization/tree/main/examples/notarization/real-world/legal_contract.rs) | Legal contract using locked notarization for immutable document hash attestation. | ## Notarization Types diff --git a/examples/real-world/01_iot_weather_station.rs b/examples/notarization/real-world/01_iot_weather_station.rs similarity index 98% rename from examples/real-world/01_iot_weather_station.rs rename to examples/notarization/real-world/01_iot_weather_station.rs index 8930c7ee..f4268019 100644 --- a/examples/real-world/01_iot_weather_station.rs +++ b/examples/notarization/real-world/01_iot_weather_station.rs @@ -19,7 +19,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use anyhow::Result; -use examples::get_funded_client; +use examples::get_funded_notarization_client; use notarization::core::types::State; use serde_json::json; @@ -28,7 +28,7 @@ async fn main() -> Result<()> { println!("🌡️ IoT Weather Station - Dynamic Notarization Example"); println!("=====================================================\n"); - let notarization_client = get_funded_client().await?; + let notarization_client = get_funded_notarization_client().await?; let now = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs(); diff --git a/examples/real-world/02_legal_contract.rs b/examples/notarization/real-world/02_legal_contract.rs similarity index 98% rename from examples/real-world/02_legal_contract.rs rename to examples/notarization/real-world/02_legal_contract.rs index 5678390a..ea7adad0 100644 --- a/examples/real-world/02_legal_contract.rs +++ b/examples/notarization/real-world/02_legal_contract.rs @@ -19,7 +19,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use anyhow::Result; -use examples::get_funded_client; +use examples::get_funded_notarization_client; use notarization::core::types::{State, TimeLock}; use serde_json::json; use sha2::{Digest, Sha256}; @@ -29,7 +29,7 @@ async fn main() -> Result<()> { println!("⚖️ Legal Contract - Locked Notarization Example"); println!("===============================================\n"); - let notarization_client = get_funded_client().await?; + let notarization_client = get_funded_notarization_client().await?; // Get current timestamp let now = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs(); diff --git a/examples/notarization/run.sh b/examples/notarization/run.sh new file mode 100755 index 00000000..c01565ee --- /dev/null +++ b/examples/notarization/run.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Script to run all notarization examples +# Usage: ./run.sh +# Make sure to set IOTA_NOTARIZATION_PKG_ID environment variable + +if [ -z "$IOTA_NOTARIZATION_PKG_ID" ]; then + echo "Error: IOTA_NOTARIZATION_PKG_ID environment variable is not set" + echo "Usage: IOTA_NOTARIZATION_PKG_ID=0x... ./run.sh" + exit 1 +fi + +echo "Running all Notarization examples..." +echo "Package ID: $IOTA_NOTARIZATION_PKG_ID" +echo "================================" + +examples=( + "01_create_locked_notarization" + "02_create_dynamic_notarization" + "03_update_dynamic_notarization" + "04_destroy_notarization" + "05_update_state" + "06_update_metadata" + "07_transfer_dynamic_notarization" + "08_access_read_only_methods" + "01_iot_weather_station" + "02_legal_contract" +) + +for example in "${examples[@]}"; do + echo "" + echo "Running Notarization Example: $example" + echo "------------------------" + cargo run --release --example "$example" + if [ $? -ne 0 ]; then + echo "Error: Failed to run $example" + exit 1 + fi +done + +echo "" +echo "All Notarization examples completed successfully!" diff --git a/examples/run.sh b/examples/run.sh index 56ff40ef..861ea809 100755 --- a/examples/run.sh +++ b/examples/run.sh @@ -1,42 +1,13 @@ #!/bin/bash -# Script to run all notarization examples +# Script to run all examples contained in this directory # Usage: ./run.sh -# Make sure to set IOTA_NOTARIZATION_PKG_ID environment variable - -if [ -z "$IOTA_NOTARIZATION_PKG_ID" ]; then - echo "Error: IOTA_NOTARIZATION_PKG_ID environment variable is not set" - echo "Usage: IOTA_NOTARIZATION_PKG_ID=0x... ./run.sh" - exit 1 -fi - -echo "Running all notarization examples..." -echo "Package ID: $IOTA_NOTARIZATION_PKG_ID" -echo "================================" - -examples=( - "01_create_locked_notarization" - "02_create_dynamic_notarization" - "03_update_dynamic_notarization" - "04_destroy_notarization" - "05_update_state" - "06_update_metadata" - "07_transfer_dynamic_notarization" - "08_access_read_only_methods" - "01_iot_weather_station" - "02_legal_contract" -) - -for example in "${examples[@]}"; do - echo "" - echo "Running: $example" - echo "------------------------" - cargo run --release --example "$example" - if [ $? -ne 0 ]; then - echo "Error: Failed to run $example" - exit 1 - fi -done - -echo "" -echo "All examples completed successfully!" +# Make sure to set the following environment variables: +# - IOTA_NOTARIZATION_PKG_ID: The package ID of the notarization module +# - IOTA_AUDIT_TRAIL_PKG_ID: The package ID of the audit trail module +# - IOTA_TF_COMPONENTS_PKG_ID: The package ID of the tf components module + +./examples/audit-trail/run.sh +printf "\n================================\n" +printf "================================\n\n" +./examples/notarization/run.sh diff --git a/examples/utils/utils.rs b/examples/utils/utils.rs index 5f918f9c..a3af6aec 100644 --- a/examples/utils/utils.rs +++ b/examples/utils/utils.rs @@ -1,38 +1,76 @@ -// Copyright 2020-2025 IOTA Stiftung +// Copyright 2020-2026 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 use anyhow::Context; +use audit_trail::{AuditTrailClient, PackageOverrides}; +use iota_sdk::types::base_types::ObjectID; use iota_sdk::{IOTA_LOCAL_NETWORK_URL, IotaClientBuilder}; use notarization::client::{NotarizationClient, NotarizationClientReadOnly}; use product_common::test_utils::{InMemSigner, request_funds}; -pub async fn get_read_only_client() -> anyhow::Result { +async fn get_iota_client() -> anyhow::Result { let api_endpoint = std::env::var("API_ENDPOINT").unwrap_or_else(|_| IOTA_LOCAL_NETWORK_URL.to_string()); - let iota_client = IotaClientBuilder::default() + IotaClientBuilder::default() .build(&api_endpoint) .await - .map_err(|err| anyhow::anyhow!(format!("failed to connect to network; {}", err)))?; + .map_err(|err| anyhow::anyhow!("failed to connect to network; {}", err)) +} + +fn get_package_id_from_env(env_var_name: &str) -> anyhow::Result { + let value = std::env::var(env_var_name) + .with_context(|| format!("env variable '{env_var_name}' must be set in order to run the examples"))?; + + value + .parse() + .with_context(|| format!("invalid package id in {env_var_name}")) +} - let package_id = std::env::var("IOTA_NOTARIZATION_PKG_ID") - .map_err(|e| { - anyhow::anyhow!("env variable IOTA_NOTARIZATION_PKG_ID must be set in order to run the examples").context(e) - }) - .and_then(|pkg_str| pkg_str.parse().context("invalid package id"))?; +pub async fn get_notarization_read_only_client() -> anyhow::Result { + let iota_client = get_iota_client().await?; + + let package_id = get_package_id_from_env("IOTA_NOTARIZATION_PKG_ID")?; NotarizationClientReadOnly::new_with_pkg_id(iota_client, package_id) .await .context("failed to create a read-only NotarizationClient") } -pub async fn get_funded_client() -> Result, anyhow::Error> { +pub async fn get_funded_notarization_client() -> Result, anyhow::Error> { let signer = InMemSigner::new(); let sender_address = signer.get_address().await?; request_funds(&sender_address).await?; - let read_only_client = get_read_only_client().await?; + let read_only_client = get_notarization_read_only_client().await?; let notarization_client: NotarizationClient = NotarizationClient::new(read_only_client, signer).await?; Ok(notarization_client) } + +pub async fn get_funded_audit_trail_client() -> Result, anyhow::Error> { + let iota_client = get_iota_client().await?; + + let audit_trail_pkg_id = get_package_id_from_env("IOTA_AUDIT_TRAIL_PKG_ID")?; + + let tf_components_pkg_id = get_package_id_from_env("IOTA_TF_COMPONENTS_PKG_ID")?; + + let signer = InMemSigner::new(); + let sender_address = signer.get_address().await?; + request_funds(&sender_address).await?; + + let client = AuditTrailClient::from_iota_client( + iota_client, + Some(PackageOverrides { + audit_trail: Some(audit_trail_pkg_id), + tf_component: Some(tf_components_pkg_id), + }), + ) + .await + .map_err(|e| anyhow::anyhow!("failed to create AuditTrailClient: {e}"))?; + + client + .with_signer(signer) + .await + .map_err(|e| anyhow::anyhow!("failed to attach signer to AuditTrailClient: {e}")) +} diff --git a/notarization-move/Move.history.json b/notarization-move/Move.history.json index b1b9d6aa..30d7c448 100644 --- a/notarization-move/Move.history.json +++ b/notarization-move/Move.history.json @@ -1,18 +1,18 @@ { "aliases": { - "mainnet": "6364aad5", + "devnet": "daf90477", "testnet": "2304aa97", - "devnet": "daf90477" + "mainnet": "6364aad5" }, "envs": { - "6364aad5": [ - "0x909ce9dcd9a5e97b7b8884fac8e018fad9dece348bf73837379b8694ff684cf3" + "daf90477": [ + "0x72c8433b88e6bdee0eb02a257fdebd0ec2b6c990043f35b155cb4c5cf727fdca" ], "2304aa97": [ "0x00412bd469b7f980227c6c574090348239852e43aa07818b315854fdd8a2d25f" ], - "daf90477": [ - "0x72c8433b88e6bdee0eb02a257fdebd0ec2b6c990043f35b155cb4c5cf727fdca" + "6364aad5": [ + "0x909ce9dcd9a5e97b7b8884fac8e018fad9dece348bf73837379b8694ff684cf3" ] } } \ No newline at end of file diff --git a/notarization-move/Move.lock b/notarization-move/Move.lock index 3b99649c..a0489408 100644 --- a/notarization-move/Move.lock +++ b/notarization-move/Move.lock @@ -2,18 +2,19 @@ [move] version = 3 -manifest_digest = "2E3FF0C8C2529AC5F5521920800D3385F4B722FF03E524F5AF757E81CA710024" -deps_digest = "F9B494B64F0615AED0E98FC12A85B85ECD2BC5185C22D30E7F67786BB52E507C" +manifest_digest = "E8F9EAB938F4F4898CB27E88DD059EEA0544D15A08AC9AFC6A0E81D4F3030DAC" +deps_digest = "397E6A9F7A624706DBDFEE056CE88391A15876868FD18A88504DA74EB458D697" dependencies = [ { id = "Iota", name = "Iota" }, { id = "IotaSystem", name = "IotaSystem" }, { id = "MoveStdlib", name = "MoveStdlib" }, { id = "Stardust", name = "Stardust" }, + { id = "TfComponents", name = "TfComponents" }, ] [[move.package]] id = "Iota" -source = { git = "https://github.com/iotaledger/iota.git", rev = "431ab686e6f1d4abd83b16dd7c671712002ecac8", subdir = "crates/iota-framework/packages/iota-framework" } +source = { git = "https://github.com/iotaledger/iota.git", rev = "e694e2ee8f2f9f0b9b03b843a24ff0f7bcff2930", subdir = "crates/iota-framework/packages/iota-framework" } dependencies = [ { id = "MoveStdlib", name = "MoveStdlib" }, @@ -21,7 +22,7 @@ dependencies = [ [[move.package]] id = "IotaSystem" -source = { git = "https://github.com/iotaledger/iota.git", rev = "431ab686e6f1d4abd83b16dd7c671712002ecac8", subdir = "crates/iota-framework/packages/iota-system" } +source = { git = "https://github.com/iotaledger/iota.git", rev = "e694e2ee8f2f9f0b9b03b843a24ff0f7bcff2930", subdir = "crates/iota-framework/packages/iota-system" } dependencies = [ { id = "Iota", name = "Iota" }, @@ -30,28 +31,39 @@ dependencies = [ [[move.package]] id = "MoveStdlib" -source = { git = "https://github.com/iotaledger/iota.git", rev = "431ab686e6f1d4abd83b16dd7c671712002ecac8", subdir = "crates/iota-framework/packages/move-stdlib" } +source = { git = "https://github.com/iotaledger/iota.git", rev = "e694e2ee8f2f9f0b9b03b843a24ff0f7bcff2930", subdir = "crates/iota-framework/packages/move-stdlib" } [[move.package]] id = "Stardust" -source = { git = "https://github.com/iotaledger/iota.git", rev = "431ab686e6f1d4abd83b16dd7c671712002ecac8", subdir = "crates/iota-framework/packages/stardust" } +source = { git = "https://github.com/iotaledger/iota.git", rev = "e694e2ee8f2f9f0b9b03b843a24ff0f7bcff2930", subdir = "crates/iota-framework/packages/stardust" } dependencies = [ { id = "Iota", name = "Iota" }, { id = "MoveStdlib", name = "MoveStdlib" }, ] +[[move.package]] +id = "TfComponents" +source = { git = "https://github.com/iotaledger/product-core.git", rev = "main", subdir = "components_move" } + +dependencies = [ + { id = "Iota", name = "Iota" }, + { id = "IotaSystem", name = "IotaSystem" }, + { id = "MoveStdlib", name = "MoveStdlib" }, + { id = "Stardust", name = "Stardust" }, +] + [move.toolchain-version] -compiler-version = "1.18.0-beta" +compiler-version = "1.16.2-rc" edition = "2024.beta" flavor = "iota" [env] [env.localnet] -chain-id = "ecc0606a" -original-published-id = "0xfbddb4631d027b2c4f0b4b90c020713d258ed32bdb342b5397f4da71edb7478a" -latest-published-id = "0xfbddb4631d027b2c4f0b4b90c020713d258ed32bdb342b5397f4da71edb7478a" +chain-id = "4991e514" +original-published-id = "0x8d9e2e2f04101e66c778cfeef09a9fdc945b172cb9f550ace1d36b23ac536735" +latest-published-id = "0x8d9e2e2f04101e66c778cfeef09a9fdc945b172cb9f550ace1d36b23ac536735" published-version = "1" [env.devnet] diff --git a/notarization-move/Move.toml b/notarization-move/Move.toml index 93671c18..4726d096 100644 --- a/notarization-move/Move.toml +++ b/notarization-move/Move.toml @@ -6,6 +6,7 @@ name = "IotaNotarization" edition = "2024.beta" [dependencies] +TfComponents = { git = "https://github.com/iotaledger/product-core.git", subdir = "components_move", rev = "main" } [addresses] iota_notarization = "0x0" diff --git a/notarization-move/README.md b/notarization-move/README.md new file mode 100644 index 00000000..28e54da6 --- /dev/null +++ b/notarization-move/README.md @@ -0,0 +1,86 @@ +![banner](https://github.com/iotaledger/notarization/raw/HEAD/.github/banner_notarization.png) + +

+ StackExchange + Discord + Apache 2.0 license +

+ +

+ Introduction ◈ + Modules ◈ + Development & Testing ◈ + Related Libraries ◈ + Contributing +

+ +--- + +# IOTA Notarization Move Package + +## Introduction + +`notarization-move` is the on-chain Move package behind IOTA Notarization. + +It defines the core `Notarization` object and the supporting modules for: + +- dynamic notarization flows +- locked notarization flows +- immutable creation metadata +- optional updatable metadata +- state updates, transfer rules, and destruction checks +- emitted events for notarization lifecycle changes + +The package depends on `TfComponents` for shared timelock primitives. + +## Modules + +- `iota_notarization::notarization` + Core object, state model, metadata, lock metadata, updates, and destruction logic. +- `iota_notarization::dynamic_notarization` + Dynamic notarization creation and transfer flows. +- `iota_notarization::locked_notarization` + Locked notarization creation flows with timelock controls. +- `iota_notarization::method` + Method discriminator helpers for dynamic and locked variants. + +## Development And Testing + +Build the Move package: + +```bash +cd notarization-move +iota move build +``` + +Run the Move test suite: + +```bash +cd notarization-move +iota move test +``` + +Publish locally: + +```bash +cd notarization-move +./scripts/publish_package.sh +``` + +The package history files [`Move.lock`](./Move.lock) and [`Move.history.json`](./Move.history.json) are used by the Rust SDK to resolve and track deployed package versions. + +## Related Libraries + +- [Rust SDK](https://github.com/iotaledger/notarization/tree/main/notarization-rs/README.md) +- [Wasm SDK](https://github.com/iotaledger/notarization/tree/main/bindings/wasm/notarization_wasm/README.md) +- [Repository Root](https://github.com/iotaledger/notarization/tree/main/README.md) + +## Contributing + +We would love to have you help us with the development of IOTA Notarization. Each and every contribution is greatly valued. + +Please review the [contribution](https://docs.iota.org/developer/iota-notarization/contribute) sections in the [IOTA Docs Portal](https://docs.iota.org/developer/iota-notarization/). + +To contribute directly to the repository, simply fork the project, push your changes to your fork and create a pull request to get them included. + +The best place to get involved in discussions about this package or to look for support at is the `#notarization` channel on the [IOTA Discord](https://discord.gg/iota-builders). You can also ask questions on our [Stack Exchange](https://iota.stackexchange.com/). diff --git a/notarization-move/sources/dynamic_notarization.move b/notarization-move/sources/dynamic_notarization.move index ca5e7950..b9edcfae 100644 --- a/notarization-move/sources/dynamic_notarization.move +++ b/notarization-move/sources/dynamic_notarization.move @@ -5,8 +5,9 @@ module iota_notarization::dynamic_notarization; use iota::{clock::Clock, event}; -use iota_notarization::{notarization, timelock::TimeLock}; +use iota_notarization::notarization; use std::string::String; +use tf_components::timelock::TimeLock; // ===== Constants ===== /// Cannot transfer a locked notarization diff --git a/notarization-move/sources/locked_notarization.move b/notarization-move/sources/locked_notarization.move index f1c02fa0..e843e9c9 100644 --- a/notarization-move/sources/locked_notarization.move +++ b/notarization-move/sources/locked_notarization.move @@ -5,8 +5,9 @@ module iota_notarization::locked_notarization; use iota::{clock::Clock, event}; -use iota_notarization::{notarization, timelock::TimeLock}; +use iota_notarization::notarization; use std::string::String; +use tf_components::timelock::TimeLock; /// Event emitted when a locked notarization is created public struct LockedNotarizationCreated has copy, drop { diff --git a/notarization-move/sources/notarization.move b/notarization-move/sources/notarization.move index 7ea207f7..6ff8e4fa 100644 --- a/notarization-move/sources/notarization.move +++ b/notarization-move/sources/notarization.move @@ -7,11 +7,9 @@ module iota_notarization::notarization; use iota::{clock::{Self, Clock}, event}; -use iota_notarization::{ - method::{NotarizationMethod, new_dynamic, new_locked}, - timelock::{Self, TimeLock} -}; +use iota_notarization::method::{NotarizationMethod, new_dynamic, new_locked}; use std::string::String; +use tf_components::timelock::{Self, TimeLock}; // ===== Constants ===== /// Cannot update state while notarization is locked for updates diff --git a/notarization-move/sources/timelock.move b/notarization-move/sources/timelock.move deleted file mode 100644 index e287228d..00000000 --- a/notarization-move/sources/timelock.move +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) 2025 IOTA Stiftung -// SPDX-License-Identifier: Apache-2.0 - -/// # Timelock Unlock Condition Module -/// -/// This module implements a timelock mechanism that restricts access to resources -/// until a specified time has passed. It provides functionality to create and validate -/// different types of time-based locks: -/// -/// - Simple time locks that unlock at a specific Unix timestamp -/// - UntilDestroyed lock that never unlocks until the notarization is destroyed -/// - None lock that is not locked -module iota_notarization::timelock; - -use iota::clock::{Self, Clock}; - -// ===== Errors ===== -/// Error when attempting to create a timelock with a timestamp in the past -const EPastTimestamp: u64 = 0; -/// Error when attempting to destroy a timelock that is still locked -const ETimelockNotExpired: u64 = 1; - -/// Represents different types of time-based locks that can be applied to -/// notarizations. -public enum TimeLock has store { - /// A lock that unlocks at a specific Unix timestamp (seconds since epoch) - UnlockAt(u32), - /// A permanent lock that never unlocks until the notarization object is destroyed (can't be used for `delete_lock`) - UntilDestroyed, - /// No lock applied - None, -} - -/// Creates a new time lock that unlocks at a specific Unix timestamp. -public fun unlock_at(unix_time: u32, clock: &Clock): TimeLock { - let now = (clock::timestamp_ms(clock) / 1000) as u32; - - assert!(is_valid_period(unix_time, now), EPastTimestamp); - - TimeLock::UnlockAt(unix_time) -} - -/// Creates a new UntilDestroyed lock that never unlocks until the notarization object is destroyed. -public fun until_destroyed(): TimeLock { - TimeLock::UntilDestroyed -} - -/// Create a new lock that is not locked. -public fun none(): TimeLock { - TimeLock::None -} - -/// Checks if the provided lock time is an UntilDestroyed lock. -public fun is_until_destroyed(lock_time: &TimeLock): bool { - match (lock_time) { - TimeLock::UntilDestroyed => true, - _ => false, - } -} - -/// Checks if the provided lock time is a UnlockAt lock. -public fun is_unlock_at(lock_time: &TimeLock): bool { - match (lock_time) { - TimeLock::UnlockAt(_) => true, - _ => false, - } -} - -/// Checks if the provided lock time is a None lock. -public fun is_none(lock_time: &TimeLock): bool { - match (lock_time) { - TimeLock::None => true, - _ => false, - } -} - -/// Gets the unlock time from a TimeLock if it is a UnixTime lock. -public fun get_unlock_time(lock_time: &TimeLock): Option { - match (lock_time) { - TimeLock::UnlockAt(time) => option::some(*time), - _ => option::none(), - } -} - -/// Destroys a TimeLock if it's either unlocked or an UntilDestroyed lock. -public fun destroy(condition: TimeLock, clock: &Clock) { - // The TimeLock is always destroyed, except of those cases where an assertion is raised - match (condition) { - TimeLock::UnlockAt(time) => { - assert!(!(time > ((clock::timestamp_ms(clock) / 1000) as u32)), ETimelockNotExpired); - }, - TimeLock::UntilDestroyed => {}, - TimeLock::None => {}, - } -} - -/// Checks if a timelock condition is currently active (locked). -/// -/// This function evaluates whether a given TimeLock instance is currently in a locked state -/// by comparing the current time with the lock's parameters. A lock is considered active if: -/// 1. For UnixTime locks: The current time hasn't reached the specified unlock time yet -/// 2. For UntilDestroyed: Always returns true as these locks never unlock until the notarization is destroyed -/// 3. For None: Always returns false as there is no lock -public fun is_timelocked(condition: &TimeLock, clock: &Clock): bool { - match (condition) { - TimeLock::UnlockAt(unix_time) => { - *unix_time > ((clock::timestamp_ms(clock) / 1000) as u32) - }, - TimeLock::UntilDestroyed => true, - TimeLock::None => false, - } -} - -/// Check if a timelock condition is `UnlockAt` -public fun is_timelocked_unlock_at(lock_time: &TimeLock, clock: &Clock): bool { - match (lock_time) { - TimeLock::UnlockAt(time) => { - *time > ((clock::timestamp_ms(clock) / 1000) as u32) - }, - _ => false, - } -} - -/// Validates that a specified unlock time is in the future. -public fun is_valid_period(unix_time: u32, current_time: u32): bool { - unix_time > current_time -} diff --git a/notarization-move/tests/dynamic_notarization_tests.move b/notarization-move/tests/dynamic_notarization_tests.move index 6c929873..0077bbde 100644 --- a/notarization-move/tests/dynamic_notarization_tests.move +++ b/notarization-move/tests/dynamic_notarization_tests.move @@ -6,8 +6,9 @@ module iota_notarization::dynamic_notarization_tests; use iota::{clock, test_scenario::{Self as ts, ctx}}; -use iota_notarization::{dynamic_notarization, notarization, timelock}; +use iota_notarization::{dynamic_notarization, notarization}; use std::string; +use tf_components::timelock; const ADMIN_ADDRESS: address = @0x01; const RECIPIENT_ADDRESS: address = @0x02; diff --git a/notarization-move/tests/locked_notarization_tests.move b/notarization-move/tests/locked_notarization_tests.move index 43a05780..8f04965d 100644 --- a/notarization-move/tests/locked_notarization_tests.move +++ b/notarization-move/tests/locked_notarization_tests.move @@ -6,8 +6,9 @@ module iota_notarization::locked_notarization_tests; use iota::{clock, test_scenario as ts}; -use iota_notarization::{locked_notarization, notarization, timelock}; +use iota_notarization::{locked_notarization, notarization}; use std::string; +use tf_components::timelock; const ADMIN_ADDRESS: address = @0x1; diff --git a/notarization-move/tests/notarization_tests.move b/notarization-move/tests/notarization_tests.move index 17e37167..dc317470 100644 --- a/notarization-move/tests/notarization_tests.move +++ b/notarization-move/tests/notarization_tests.move @@ -6,8 +6,9 @@ module iota_notarization::notarization_tests; use iota::{clock, test_scenario as ts}; -use iota_notarization::{notarization, timelock}; +use iota_notarization::notarization; use std::string; +use tf_components::timelock; const ADMIN_ADDRESS: address = @0x1; diff --git a/notarization-move/tests/timelock_tests.move b/notarization-move/tests/timelock_tests.move deleted file mode 100644 index 68aa0f13..00000000 --- a/notarization-move/tests/timelock_tests.move +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright (c) 2024 IOTA Stiftung -// SPDX-License-Identifier: Apache-2.0 - -/// This module provides tests for the timelock module -#[test_only] -module iota_notarization::timelock_tests; - -use iota::{clock, test_scenario::{Self as ts, ctx}}; -use iota_notarization::timelock; - -const ADMIN_ADDRESS: address = @0x01; - -#[test] -public fun test_new_unlock_at() { - let mut ts = ts::begin(ADMIN_ADDRESS); - - let ctx = ts.ctx(); - - let mut clock = clock::create_for_testing(ctx); - clock::set_for_testing(&mut clock, 1000000); - - let lock = timelock::unlock_at(1001, &clock); - - assert!(timelock::is_unlock_at(&lock)); - assert!(timelock::get_unlock_time(&lock) == std::option::some(1001)); - assert!(timelock::is_timelocked(&lock, &clock)); - - // Advance time by setting a new timestamp - clock::increment_for_testing(&mut clock, 1000); - - assert!(!timelock::is_timelocked(&lock, &clock)); - - timelock::destroy(lock, &clock); - clock::destroy_for_testing(clock); - - ts.end(); -} - -#[test] -#[expected_failure(abort_code = timelock::EPastTimestamp)] -public fun test_new_unlock_at_past_time() { - let mut ts = ts::begin(ADMIN_ADDRESS); - let ctx = ts.ctx(); - - let mut clock = clock::create_for_testing(ctx); - clock::set_for_testing(&mut clock, 1000000); - - // Try to create a timelock with a timestamp in the past - let lock = timelock::unlock_at(999, &clock); - - // This should never be reached - timelock::destroy(lock, &clock); - clock::destroy_for_testing(clock); - - ts.end(); -} - -#[test] -public fun test_until_destroyed() { - let mut ts = ts::begin(ADMIN_ADDRESS); - let ctx = ts.ctx(); - - let mut clock = clock::create_for_testing(ctx); - clock::set_for_testing(&mut clock, 1000000); - - let lock = timelock::until_destroyed(); - - assert!(timelock::is_until_destroyed(&lock)); - assert!(!timelock::is_unlock_at(&lock)); - assert!(timelock::get_unlock_time(&lock) == std::option::none()); - - // UntilDestroyed is always timelocked - assert!(timelock::is_timelocked(&lock, &clock)); - - // Even after a long time - clock::increment_for_testing(&mut clock, 1000000); - assert!(timelock::is_timelocked(&lock, &clock)); - - // UntilDestroyed can always be destroyed without error - timelock::destroy(lock, &clock); - clock::destroy_for_testing(clock); - - ts.end(); -} - -#[test] -public fun test_none_lock() { - let mut ts = ts::begin(ADMIN_ADDRESS); - let ctx = ts.ctx(); - - let mut clock = clock::create_for_testing(ctx); - clock::set_for_testing(&mut clock, 1000000); - - let lock = timelock::none(); - - assert!(!timelock::is_until_destroyed(&lock)); - assert!(!timelock::is_unlock_at(&lock)); - assert!(timelock::get_unlock_time(&lock) == std::option::none()); - - // None is never timelocked - assert!(!timelock::is_timelocked(&lock, &clock)); - - // None can always be destroyed without error - timelock::destroy(lock, &clock); - clock::destroy_for_testing(clock); - - ts.end(); -} - -#[test] -#[expected_failure(abort_code = timelock::ETimelockNotExpired)] -public fun test_destroy_locked_timelock() { - let mut ts = ts::begin(ADMIN_ADDRESS); - let ctx = ts.ctx(); - - let mut clock = clock::create_for_testing(ctx); - clock::set_for_testing(&mut clock, 1000000); - - // Create a timelock that unlocks at time 2000 - let lock = timelock::unlock_at(2000, &clock); - - // Try to destroy it before it's unlocked - // This should fail with ETimelockNotExpired - timelock::destroy(lock, &clock); - - // These should never be reached - clock::destroy_for_testing(clock); - ts.end(); -} - -#[test] -public fun test_is_timelocked_unlock_at() { - let mut ts = ts::begin(ADMIN_ADDRESS); - let ctx = ts.ctx(); - - let mut clock = clock::create_for_testing(ctx); - clock::set_for_testing(&mut clock, 1000000); - - // Create different types of locks - let unlock_at_lock = timelock::unlock_at(2000, &clock); - let until_destroyed_lock = timelock::until_destroyed(); - let none_lock = timelock::none(); - - // Test is_timelocked_unlock_at - assert!(timelock::is_timelocked_unlock_at(&unlock_at_lock, &clock)); - assert!(!timelock::is_timelocked_unlock_at(&until_destroyed_lock, &clock)); - assert!(!timelock::is_timelocked_unlock_at(&none_lock, &clock)); - - // Advance time past unlock time - clock::increment_for_testing(&mut clock, 1000000); - - // Now the unlock_at lock should not be timelocked - assert!(!timelock::is_timelocked_unlock_at(&unlock_at_lock, &clock)); - - // Clean up - timelock::destroy(unlock_at_lock, &clock); - timelock::destroy(until_destroyed_lock, &clock); - timelock::destroy(none_lock, &clock); - clock::destroy_for_testing(clock); - - ts.end(); -} - -#[test] -public fun test_is_valid_period() { - // Test valid periods - assert!(timelock::is_valid_period(1001, 1000)); - assert!(timelock::is_valid_period(2000, 1000)); - - // Test invalid periods - assert!(!timelock::is_valid_period(1000, 1000)); // Equal time - assert!(!timelock::is_valid_period(999, 1000)); // Past time -} - -#[test] -public fun test_edge_cases() { - let mut ts = ts::begin(ADMIN_ADDRESS); - let ctx = ts.ctx(); - - let mut clock = clock::create_for_testing(ctx); - clock::set_for_testing(&mut clock, 1000000); - - // Test with time just one second in the future - let one_second_future = timelock::unlock_at(1001, &clock); - assert!(timelock::is_timelocked(&one_second_future, &clock)); - clock::set_for_testing(&mut clock, 1001000); - assert!(!timelock::is_timelocked(&one_second_future, &clock)); - - // Test with time exactly at the current time boundary - clock::set_for_testing(&mut clock, 2000000); - let exact_current_time = timelock::unlock_at(2001, &clock); - assert!(timelock::is_timelocked(&exact_current_time, &clock)); - clock::set_for_testing(&mut clock, 2001000); - assert!(!timelock::is_timelocked(&exact_current_time, &clock)); - - // Clean up - timelock::destroy(one_second_future, &clock); - timelock::destroy(exact_current_time, &clock); - clock::destroy_for_testing(clock); - - ts.end(); -} diff --git a/notarization-rs/Cargo.toml b/notarization-rs/Cargo.toml index d6653103..f21d7618 100644 --- a/notarization-rs/Cargo.toml +++ b/notarization-rs/Cargo.toml @@ -26,7 +26,6 @@ thiserror.workspace = true [target.'cfg(not(target_arch = "wasm32"))'.dependencies] iota_interaction_rust = { workspace = true, default-features = false } -hyper = { workspace = true } iota-sdk = { workspace = true } tokio = { workspace = true } diff --git a/notarization-rs/README.md b/notarization-rs/README.md index e7faf0a9..75be16ba 100644 --- a/notarization-rs/README.md +++ b/notarization-rs/README.md @@ -6,37 +6,6 @@ instance, which is mapped to the Notarization object on the ledger and can be us You can find the full IOTA Notarization documentation [here](https://docs.iota.org/developer/iota-notarization). -Following Notarization methods are currently provided: - -- Dynamic Notarization -- Locked Notarization - -These Notarization methods are implemented using a single Notarization Move object, stored on the IOTA Ledger. -The Method specific behavior is achieved via configuration of this object. - -To minimize the need for config settings, the Notarization methods reduce the number of available configuration -parameters while using method specific fixed settings for several parameters, resulting in the typical method -specific behaviour. Here, Notarization methods can be seen as prepared configuration sets to facilitate -Notarization usage for often needed use cases. - -Here is an overview of the most important configuration parameters for each of these methods: - -| Method | Locking exists | delete_lock* | update_lock | transfer_lock | -| ------- | --------------- | ---------------- | ----------------------- | ----------------------- | -| Dynamic | Optional [conf] | None [static] | None [static] | Optional [conf] | -| Locked | Yes [static] | Optional* [conf] | UntilDestroyed [static] | UntilDestroyed [static] | - -Explanation of terms and symbols for the table above: - -- [conf]: Configurable parameter. -- [static]: Fixed or static parameter. -- Optional: - - Locks: The lock can be set to UnlockAt or UntilDestroyed. - - Locking exists: If no locking is used, there will be no [`LockMetadata`] stored with the Notarization object - Otherwise [`LockMetadata`] will be created automatically. If no [`LockMetadata`] exist, the behaviour is - equivalent to existing [`LockMetadata`] with all locks set to [`None`]. - - *: delete_lock can not be set to `UntilDestroyed`. - ## Process Flows The following workflows demonstrate how NotarizationBuilder and Notarization instances can be used to create, update and diff --git a/notarization-rs/src/core/types/timelock.rs b/notarization-rs/src/core/types/timelock.rs index 07beba36..2c4ca477 100644 --- a/notarization-rs/src/core/types/timelock.rs +++ b/notarization-rs/src/core/types/timelock.rs @@ -8,12 +8,6 @@ //! ## Overview //! //! The time-based locks are used to restrict the access to a notarization. -//! -//! ## Types -//! -//! - `UnlockAt`: The lock is unlocked at a specific time. -//! - `UntilDestroyed`: The lock is locked until the notarization is destroyed. -//! - `None`: The lock is not applied. use std::str::FromStr; use std::time::SystemTime; @@ -40,20 +34,25 @@ pub struct LockMetadata { /// notarizations. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub enum TimeLock { - /// A lock that is unlocked at a specific time. + /// A lock that unlocks at a specific Unix timestamp (seconds since Unix epoch) UnlockAt(u32), - /// A lock that is unlocked when the notarization is destroyed. + /// Same as UnlockAt (unlocks at specific timestamp) but using milliseconds since Unix epoch + UnlockAtMs(u64), + /// A permanent lock that never unlocks until the locked object is destroyed (can't be used for `delete_lock`) UntilDestroyed, + /// A lock that never unlocks (permanent lock) + Infinite, + /// No lock applied None, } impl TimeLock { - /// Creates a new `TimeLock` with a specified unlock time.\ + /// Creates a new `TimeLock::UnlockAt` with a specified unlock time.\ /// /// The unlock time is the time in seconds since the Unix epoch and /// must be in the future. - pub fn new_with_ts(unlock_time: u32) -> Result { - if unlock_time + pub fn new_with_ts(unlock_time_sec: u32) -> Result { + if unlock_time_sec <= SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .expect("system time is before the Unix epoch") @@ -62,7 +61,24 @@ impl TimeLock { return Err(Error::InvalidArgument("unlock time must be in the future".to_string())); } - Ok(TimeLock::UnlockAt(unlock_time)) + Ok(TimeLock::UnlockAt(unlock_time_sec)) + } + + /// Creates a new `TimeLock::UnlockAtMs` with a specified unlock time.\ + /// + /// The unlock time is the time in milliseconds since the Unix epoch and + /// must be in the future. + pub fn new_with_ts_ms(unlock_time_ms: u64) -> Result { + if unlock_time_ms + <= SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("system time is before the Unix epoch") + .as_millis() as u64 + { + return Err(Error::InvalidArgument("unlock time must be in the future".to_string())); + } + + Ok(TimeLock::UnlockAtMs(unlock_time_ms)) } /// Creates a new `Argument` from the `TimeLock`. @@ -71,23 +87,39 @@ impl TimeLock { pub(in crate::core) fn to_ptb(&self, ptb: &mut Ptb, package_id: ObjectID) -> Result { match self { TimeLock::UnlockAt(unlock_time) => new_unlock_at(ptb, *unlock_time, package_id), + TimeLock::UnlockAtMs(unlock_time) => new_unlock_at_ms(ptb, *unlock_time, package_id), TimeLock::UntilDestroyed => new_until_destroyed(ptb, package_id), + TimeLock::Infinite => new_infinite(ptb, package_id), TimeLock::None => new_none(ptb, package_id), } } } /// Creates a new `Argument` for the `unlock_at` function. -pub(super) fn new_unlock_at(ptb: &mut Ptb, unlock_time: u32, package_id: ObjectID) -> Result { +pub(super) fn new_unlock_at(ptb: &mut Ptb, unlock_time_sec: u32, package_id: ObjectID) -> Result { let clock = move_utils::get_clock_ref(ptb); - let unlock_time = move_utils::ptb_pure(ptb, "unlock_time", unlock_time)?; + let unlock_time_sec = move_utils::ptb_pure(ptb, "unlock_time", unlock_time_sec)?; Ok(ptb.programmable_move_call( package_id, ident_str!("timelock").into(), ident_str!("unlock_at").into(), vec![], - vec![unlock_time, clock], + vec![unlock_time_sec, clock], + )) +} + +/// Creates a new `Argument` for the `unlock_at` function. +pub(super) fn new_unlock_at_ms(ptb: &mut Ptb, unlock_time_ms: u64, package_id: ObjectID) -> Result { + let clock = move_utils::get_clock_ref(ptb); + let unlock_time_ms = move_utils::ptb_pure(ptb, "unlock_time", unlock_time_ms)?; + + Ok(ptb.programmable_move_call( + package_id, + ident_str!("timelock").into(), + ident_str!("unlock_at").into(), + vec![], + vec![unlock_time_ms, clock], )) } @@ -102,6 +134,17 @@ pub(super) fn new_until_destroyed(ptb: &mut Ptb, package_id: ObjectID) -> Result )) } +/// Creates a new `Argument` for the `until_destroyed` function. +pub(super) fn new_infinite(ptb: &mut Ptb, package_id: ObjectID) -> Result { + Ok(ptb.programmable_move_call( + package_id, + ident_str!("timelock").into(), + ident_str!("infinite").into(), + vec![], + vec![], + )) +} + /// Creates a new `Argument` for the `none` function. pub(super) fn new_none(ptb: &mut Ptb, package_id: ObjectID) -> Result { Ok(ptb.programmable_move_call( diff --git a/notarization-rs/src/lib.rs b/notarization-rs/src/lib.rs index 611fae81..25512ddd 100644 --- a/notarization-rs/src/lib.rs +++ b/notarization-rs/src/lib.rs @@ -1,6 +1,8 @@ // Copyright 2020-2025 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 +#![doc = include_str!("../README.md")] + pub mod client; pub mod core; pub mod error;