diff --git a/.dockerignore b/.dockerignore index 05c2a8e3..a9a358a3 100644 --- a/.dockerignore +++ b/.dockerignore @@ -5,6 +5,7 @@ !.git !docker/healthcheck.sh !docker/start.sh +!macros !migrations !src diff --git a/.env.template b/.env.template index 43748f48..23d4fc29 100644 --- a/.env.template +++ b/.env.template @@ -351,6 +351,7 @@ ## - "browser-fileless-import": Directly import credentials from other providers without a file. ## - "extension-refresh": Temporarily enable the new extension design until general availability (should be used with the beta Chrome extension) ## - "fido2-vault-credentials": Enable the use of FIDO2 security keys as second factor. +## - "inline-menu-positioning-improvements": Enable the use of inline menu password generator and identity suggestions in the browser extension. ## - "ssh-key-vault-item": Enable the creation and use of SSH key vault items. (Needs clients >=2024.12.0) ## - "ssh-agent": Enable SSH agent support on Desktop. (Needs desktop >=2024.12.0) # EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials @@ -411,6 +412,14 @@ ## Multiple values must be separated with a whitespace. # ALLOWED_IFRAME_ANCESTORS= +## Allowed connect-src (Know the risks!) +## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/connect-src +## Allows other domains to URLs which can be loaded using script interfaces like the Forwarded email alias feature +## This adds the configured value to the 'Content-Security-Policy' headers 'connect-src' value. +## Multiple values must be separated with a whitespace. And only HTTPS values are allowed. +## Example: "https://my-addy-io.domain.tld https://my-simplelogin.domain.tld" +# ALLOWED_CONNECT_SRC="" + ## Number of seconds, on average, between login requests from the same IP address before rate limiting kicks in. # LOGIN_RATELIMIT_SECONDS=60 ## Allow a burst of requests of up to this size, while maintaining the average indicated by `LOGIN_RATELIMIT_SECONDS`. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a025041f..86e5213f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,4 +1,5 @@ name: Build +permissions: {} on: push: @@ -13,6 +14,7 @@ on: - "diesel.toml" - "docker/Dockerfile.j2" - "docker/DockerSettings.yaml" + pull_request: paths: - ".github/workflows/build.yml" @@ -28,13 +30,17 @@ on: jobs: build: + name: Build and Test ${{ matrix.channel }} + permissions: + actions: write + contents: read # We use Ubuntu 22.04 here because this matches the library versions used within the Debian docker containers runs-on: ubuntu-22.04 timeout-minutes: 120 # Make warnings errors, this is to prevent warnings slipping through. # This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes. env: - RUSTFLAGS: "-D warnings" + RUSTFLAGS: "-Dwarnings" strategy: fail-fast: false matrix: @@ -42,20 +48,19 @@ jobs: - "rust-toolchain" # The version defined in rust-toolchain - "msrv" # The supported MSRV - name: Build and Test ${{ matrix.channel }} - steps: - # Checkout the repo - - name: "Checkout" - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 #v4.2.1 - # End Checkout the repo - - # Install dependencies - name: "Install dependencies Ubuntu" run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config # End Install dependencies + # Checkout the repo + - name: "Checkout" + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2 + with: + persist-credentials: false + fetch-depth: 0 + # End Checkout the repo # Determine rust-toolchain version - name: Init Variables @@ -75,7 +80,7 @@ jobs: # Only install the clippy and rustfmt components on the default rust-toolchain - name: "Install rust-toolchain version" - uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a # master @ Aug 8, 2024, 7:36 PM GMT+2 + uses: dtolnay/rust-toolchain@c5a29ddb4d9d194e7c84ec8c3fba61b1c31fee8c # master @ Jan 30, 2025, 8:16 PM GMT+1 if: ${{ matrix.channel == 'rust-toolchain' }} with: toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}" @@ -85,7 +90,7 @@ jobs: # Install the any other channel to be used for which we do not execute clippy and rustfmt - name: "Install MSRV version" - uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a # master @ Aug 8, 2024, 7:36 PM GMT+2 + uses: dtolnay/rust-toolchain@c5a29ddb4d9d194e7c84ec8c3fba61b1c31fee8c # master @ Jan 30, 2025, 8:16 PM GMT+1 if: ${{ matrix.channel != 'rust-toolchain' }} with: toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}" @@ -93,11 +98,13 @@ jobs: # Set the current matrix toolchain version as default - name: "Set toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default" + env: + RUST_TOOLCHAIN: ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} run: | # Remove the rust-toolchain.toml rm rust-toolchain.toml # Set the default - rustup default ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} + rustup default "${RUST_TOOLCHAIN}" # Show environment - name: "Show environment" @@ -107,7 +114,8 @@ jobs: # End Show environment # Enable Rust Caching - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + - name: Rust Caching + uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7 with: # Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes. # Like changing the build host from Ubuntu 20.04 to 22.04 for example. @@ -117,33 +125,39 @@ jobs: # Run cargo tests # First test all features together, afterwards test them separately. + - name: "test features: sqlite,mysql,postgresql,enable_mimalloc,query_logger" + id: test_sqlite_mysql_postgresql_mimalloc_logger + if: ${{ !cancelled() }} + run: | + cargo test --features sqlite,mysql,postgresql,enable_mimalloc,query_logger + - name: "test features: sqlite,mysql,postgresql,enable_mimalloc" id: test_sqlite_mysql_postgresql_mimalloc - if: $${{ always() }} + if: ${{ !cancelled() }} run: | cargo test --features sqlite,mysql,postgresql,enable_mimalloc - name: "test features: sqlite,mysql,postgresql" id: test_sqlite_mysql_postgresql - if: $${{ always() }} + if: ${{ !cancelled() }} run: | cargo test --features sqlite,mysql,postgresql - name: "test features: sqlite" id: test_sqlite - if: $${{ always() }} + if: ${{ !cancelled() }} run: | cargo test --features sqlite - name: "test features: mysql" id: test_mysql - if: $${{ always() }} + if: ${{ !cancelled() }} run: | cargo test --features mysql - name: "test features: postgresql" id: test_postgresql - if: $${{ always() }} + if: ${{ !cancelled() }} run: | cargo test --features postgresql # End Run cargo tests @@ -152,16 +166,16 @@ jobs: # Run cargo clippy, and fail on warnings - name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc" id: clippy - if: ${{ always() && matrix.channel == 'rust-toolchain' }} + if: ${{ !cancelled() && matrix.channel == 'rust-toolchain' }} run: | - cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings + cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc # End Run cargo clippy # Run cargo fmt (Only run on rust-toolchain defined version) - name: "check formatting" id: formatting - if: ${{ always() && matrix.channel == 'rust-toolchain' }} + if: ${{ !cancelled() && matrix.channel == 'rust-toolchain' }} run: | cargo fmt --all -- --check # End Run cargo fmt @@ -171,21 +185,31 @@ jobs: # This is useful so all test/clippy/fmt actions are done, and they can all be addressed - name: "Some checks failed" if: ${{ failure() }} + env: + TEST_DB_M_L: ${{ steps.test_sqlite_mysql_postgresql_mimalloc_logger.outcome }} + TEST_DB_M: ${{ steps.test_sqlite_mysql_postgresql_mimalloc.outcome }} + TEST_DB: ${{ steps.test_sqlite_mysql_postgresql.outcome }} + TEST_SQLITE: ${{ steps.test_sqlite.outcome }} + TEST_MYSQL: ${{ steps.test_mysql.outcome }} + TEST_POSTGRESQL: ${{ steps.test_postgresql.outcome }} + CLIPPY: ${{ steps.clippy.outcome }} + FMT: ${{ steps.formatting.outcome }} run: | - echo "### :x: Checks Failed!" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "|Job|Status|" >> $GITHUB_STEP_SUMMARY - echo "|---|------|" >> $GITHUB_STEP_SUMMARY - echo "|test (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.test_sqlite_mysql_postgresql_mimalloc.outcome }}|" >> $GITHUB_STEP_SUMMARY - echo "|test (sqlite,mysql,postgresql)|${{ steps.test_sqlite_mysql_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY - echo "|test (sqlite)|${{ steps.test_sqlite.outcome }}|" >> $GITHUB_STEP_SUMMARY - echo "|test (mysql)|${{ steps.test_mysql.outcome }}|" >> $GITHUB_STEP_SUMMARY - echo "|test (postgresql)|${{ steps.test_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY - echo "|clippy (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.clippy.outcome }}|" >> $GITHUB_STEP_SUMMARY - echo "|fmt|${{ steps.formatting.outcome }}|" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "Please check the failed jobs and fix where needed." >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY + echo "### :x: Checks Failed!" >> "${GITHUB_STEP_SUMMARY}" + echo "" >> "${GITHUB_STEP_SUMMARY}" + echo "|Job|Status|" >> "${GITHUB_STEP_SUMMARY}" + echo "|---|------|" >> "${GITHUB_STEP_SUMMARY}" + echo "|test (sqlite,mysql,postgresql,enable_mimalloc,query_logger)|${TEST_DB_M_L}|" >> "${GITHUB_STEP_SUMMARY}" + echo "|test (sqlite,mysql,postgresql,enable_mimalloc)|${TEST_DB_M}|" >> "${GITHUB_STEP_SUMMARY}" + echo "|test (sqlite,mysql,postgresql)|${TEST_DB}|" >> "${GITHUB_STEP_SUMMARY}" + echo "|test (sqlite)|${TEST_SQLITE}|" >> "${GITHUB_STEP_SUMMARY}" + echo "|test (mysql)|${TEST_MYSQL}|" >> "${GITHUB_STEP_SUMMARY}" + echo "|test (postgresql)|${TEST_POSTGRESQL}|" >> "${GITHUB_STEP_SUMMARY}" + echo "|clippy (sqlite,mysql,postgresql,enable_mimalloc)|${CLIPPY}|" >> "${GITHUB_STEP_SUMMARY}" + echo "|fmt|${FMT}|" >> "${GITHUB_STEP_SUMMARY}" + echo "" >> "${GITHUB_STEP_SUMMARY}" + echo "Please check the failed jobs and fix where needed." >> "${GITHUB_STEP_SUMMARY}" + echo "" >> "${GITHUB_STEP_SUMMARY}" exit 1 @@ -194,5 +218,5 @@ jobs: - name: "All checks passed" if: ${{ success() }} run: | - echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY + echo "### :tada: Checks Passed!" >> "${GITHUB_STEP_SUMMARY}" + echo "" >> "${GITHUB_STEP_SUMMARY}" diff --git a/.github/workflows/hadolint.yml b/.github/workflows/hadolint.yml index a671f936..240d6dbf 100644 --- a/.github/workflows/hadolint.yml +++ b/.github/workflows/hadolint.yml @@ -1,24 +1,20 @@ name: Hadolint +permissions: {} -on: [ - push, - pull_request - ] +on: [ push, pull_request ] jobs: hadolint: name: Validate Dockerfile syntax + permissions: + contents: read runs-on: ubuntu-24.04 timeout-minutes: 30 - steps: - # Checkout the repo - - name: Checkout - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 #v4.2.1 - # End Checkout the repo + steps: # Start Docker Buildx - name: Setup Docker Buildx - uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 + uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 # v3.8.0 # https://github.com/moby/buildkit/issues/3969 # Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills with: @@ -37,6 +33,12 @@ jobs: env: HADOLINT_VERSION: 2.12.0 # End Download hadolint + # Checkout the repo + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2 + with: + persist-credentials: false + # End Checkout the repo # Test Dockerfiles with hadolint - name: Run hadolint diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 22fc4e28..d155c159 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,4 +1,5 @@ name: Release +permissions: {} on: push: @@ -6,17 +7,23 @@ on: - main tags: - - '*' + # https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#filter-pattern-cheat-sheet + - '[1-2].[0-9]+.[0-9]+' jobs: # https://github.com/marketplace/actions/skip-duplicate-actions # Some checks to determine if we need to continue with building a new docker. # We will skip this check if we are creating a tag, because that has the same hash as a previous run already. skip_check: - runs-on: ubuntu-24.04 + # Only run this in the upstream repo and not on forks if: ${{ github.repository == 'dani-garcia/vaultwarden' }} + name: Cancel older jobs when running + permissions: + actions: write + runs-on: ubuntu-24.04 outputs: should_skip: ${{ steps.skip_check.outputs.should_skip }} + steps: - name: Skip Duplicates Actions id: skip_check @@ -27,11 +34,17 @@ jobs: if: ${{ github.ref_type == 'branch' }} docker-build: - runs-on: ubuntu-24.04 - timeout-minutes: 120 needs: skip_check if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }} - # Start a local docker registry to extract the final Alpine static build binaries + name: Build Vaultwarden containers + permissions: + packages: write + contents: read + attestations: write + id-token: write + runs-on: ubuntu-24.04 + timeout-minutes: 120 + # Start a local docker registry to extract the compiled binaries to upload as artifacts and attest them services: registry: image: registry:2 @@ -56,37 +69,42 @@ jobs: base_image: ["debian","alpine"] steps: - # Checkout the repo - - name: Checkout - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 #v4.2.1 - with: - fetch-depth: 0 - - name: Initialize QEMU binfmt support - uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 + uses: docker/setup-qemu-action@53851d14592bedcffcf25ea515637cff71ef929a # v3.3.0 with: platforms: "arm64,arm" # Start Docker Buildx - name: Setup Docker Buildx - uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 + uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 # v3.8.0 # https://github.com/moby/buildkit/issues/3969 # Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills with: + cache-binary: false buildkitd-config-inline: | [worker.oci] max-parallelism = 2 driver-opts: | network=host + # Checkout the repo + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2 + # We need fetch-depth of 0 so we also get all the tag metadata + with: + persist-credentials: false + fetch-depth: 0 + # Determine Base Tags and Source Version - name: Determine Base Tags and Source Version shell: bash + env: + REF_TYPE: ${{ github.ref_type }} run: | - # Check which main tag we are going to build determined by github.ref_type - if [[ "${{ github.ref_type }}" == "tag" ]]; then + # Check which main tag we are going to build determined by ref_type + if [[ "${REF_TYPE}" == "tag" ]]; then echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_ENV}" - elif [[ "${{ github.ref_type }}" == "branch" ]]; then + elif [[ "${REF_TYPE}" == "branch" ]]; then echo "BASE_TAGS=testing" | tee -a "${GITHUB_ENV}" fi @@ -111,8 +129,10 @@ jobs: - name: Add registry for DockerHub if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }} shell: bash + env: + DOCKERHUB_REPO: ${{ vars.DOCKERHUB_REPO }} run: | - echo "CONTAINER_REGISTRIES=${{ vars.DOCKERHUB_REPO }}" | tee -a "${GITHUB_ENV}" + echo "CONTAINER_REGISTRIES=${DOCKERHUB_REPO}" | tee -a "${GITHUB_ENV}" # Login to GitHub Container Registry - name: Login to GitHub Container Registry @@ -126,8 +146,10 @@ jobs: - name: Add registry for ghcr.io if: ${{ env.HAVE_GHCR_LOGIN == 'true' }} shell: bash + env: + GHCR_REPO: ${{ vars.GHCR_REPO }} run: | - echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}" + echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${GHCR_REPO}" | tee -a "${GITHUB_ENV}" # Login to Quay.io - name: Login to Quay.io @@ -141,17 +163,22 @@ jobs: - name: Add registry for Quay.io if: ${{ env.HAVE_QUAY_LOGIN == 'true' }} shell: bash + env: + QUAY_REPO: ${{ vars.QUAY_REPO }} run: | - echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.QUAY_REPO }}" | tee -a "${GITHUB_ENV}" + echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${QUAY_REPO}" | tee -a "${GITHUB_ENV}" - name: Configure build cache from/to shell: bash + env: + GHCR_REPO: ${{ vars.GHCR_REPO }} + BASE_IMAGE: ${{ matrix.base_image }} run: | # # Check if there is a GitHub Container Registry Login and use it for caching if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then - echo "BAKE_CACHE_FROM=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }}" | tee -a "${GITHUB_ENV}" - echo "BAKE_CACHE_TO=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}" + echo "BAKE_CACHE_FROM=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE}" | tee -a "${GITHUB_ENV}" + echo "BAKE_CACHE_TO=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}" else echo "BAKE_CACHE_FROM=" echo "BAKE_CACHE_TO=" @@ -159,13 +186,13 @@ jobs: # - name: Add localhost registry - if: ${{ matrix.base_image == 'alpine' }} shell: bash run: | echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}" - name: Bake ${{ matrix.base_image }} containers - uses: docker/bake-action@2e3d19baedb14545e5d41222653874f25d5b4dfb # v5.10.0 + id: bake_vw + uses: docker/bake-action@7bff531c65a5cda33e52e43950a795b91d450f63 # v6.3.0 env: BASE_TAGS: "${{ env.BASE_TAGS }}" SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}" @@ -175,78 +202,119 @@ jobs: with: pull: true push: true + source: . files: docker/docker-bake.hcl targets: "${{ matrix.base_image }}-multi" set: | *.cache-from=${{ env.BAKE_CACHE_FROM }} *.cache-to=${{ env.BAKE_CACHE_TO }} + - name: Extract digest SHA + shell: bash + env: + BAKE_METADATA: ${{ steps.bake_vw.outputs.metadata }} + run: | + GET_DIGEST_SHA="$(jq -r '.["${{ matrix.base_image }}-multi"]."containerimage.digest"' <<< "${BAKE_METADATA}")" + echo "DIGEST_SHA=${GET_DIGEST_SHA}" | tee -a "${GITHUB_ENV}" + + # Attest container images + - name: Attest - docker.io - ${{ matrix.base_image }} + if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}} + uses: actions/attest-build-provenance@520d128f165991a6c774bcb264f323e3d70747f4 # v2.2.0 + with: + subject-name: ${{ vars.DOCKERHUB_REPO }} + subject-digest: ${{ env.DIGEST_SHA }} + push-to-registry: true + + - name: Attest - ghcr.io - ${{ matrix.base_image }} + if: ${{ env.HAVE_GHCR_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}} + uses: actions/attest-build-provenance@520d128f165991a6c774bcb264f323e3d70747f4 # v2.2.0 + with: + subject-name: ${{ vars.GHCR_REPO }} + subject-digest: ${{ env.DIGEST_SHA }} + push-to-registry: true + + - name: Attest - quay.io - ${{ matrix.base_image }} + if: ${{ env.HAVE_QUAY_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}} + uses: actions/attest-build-provenance@520d128f165991a6c774bcb264f323e3d70747f4 # v2.2.0 + with: + subject-name: ${{ vars.QUAY_REPO }} + subject-digest: ${{ env.DIGEST_SHA }} + push-to-registry: true + # Extract the Alpine binaries from the containers - name: Extract binaries - if: ${{ matrix.base_image == 'alpine' }} shell: bash + env: + REF_TYPE: ${{ github.ref_type }} run: | - # Check which main tag we are going to build determined by github.ref_type - if [[ "${{ github.ref_type }}" == "tag" ]]; then + # Check which main tag we are going to build determined by ref_type + if [[ "${REF_TYPE}" == "tag" ]]; then EXTRACT_TAG="latest" - elif [[ "${{ github.ref_type }}" == "branch" ]]; then + elif [[ "${REF_TYPE}" == "branch" ]]; then EXTRACT_TAG="testing" fi + # Check which base_image was used and append -alpine if needed + if [[ "${{ matrix.base_image }}" == "alpine" ]]; then + EXTRACT_TAG="${EXTRACT_TAG}-alpine" + fi + # After each extraction the image is removed. # This is needed because using different platforms doesn't trigger a new pull/download # Extract amd64 binary - docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" - docker cp amd64:/vaultwarden vaultwarden-amd64 + docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" + docker cp amd64:/vaultwarden vaultwarden-amd64-${{ matrix.base_image }} docker rm --force amd64 - docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" + docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" # Extract arm64 binary - docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" - docker cp arm64:/vaultwarden vaultwarden-arm64 + docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" + docker cp arm64:/vaultwarden vaultwarden-arm64-${{ matrix.base_image }} docker rm --force arm64 - docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" + docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" # Extract armv7 binary - docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" - docker cp armv7:/vaultwarden vaultwarden-armv7 + docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" + docker cp armv7:/vaultwarden vaultwarden-armv7-${{ matrix.base_image }} docker rm --force armv7 - docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" + docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" # Extract armv6 binary - docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" - docker cp armv6:/vaultwarden vaultwarden-armv6 + docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" + docker cp armv6:/vaultwarden vaultwarden-armv6-${{ matrix.base_image }} docker rm --force armv6 - docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" + docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" - # Upload artifacts to Github Actions - - name: "Upload amd64 artifact" - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 - if: ${{ matrix.base_image == 'alpine' }} + # Upload artifacts to Github Actions and Attest the binaries + - name: "Upload amd64 artifact ${{ matrix.base_image }}" + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0 with: - name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64 - path: vaultwarden-amd64 + name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64-${{ matrix.base_image }} + path: vaultwarden-amd64-${{ matrix.base_image }} - - name: "Upload arm64 artifact" - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 - if: ${{ matrix.base_image == 'alpine' }} + - name: "Upload arm64 artifact ${{ matrix.base_image }}" + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0 with: - name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64 - path: vaultwarden-arm64 + name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64-${{ matrix.base_image }} + path: vaultwarden-arm64-${{ matrix.base_image }} - - name: "Upload armv7 artifact" - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 - if: ${{ matrix.base_image == 'alpine' }} + - name: "Upload armv7 artifact ${{ matrix.base_image }}" + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0 with: - name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7 - path: vaultwarden-armv7 + name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7-${{ matrix.base_image }} + path: vaultwarden-armv7-${{ matrix.base_image }} - - name: "Upload armv6 artifact" - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 - if: ${{ matrix.base_image == 'alpine' }} + - name: "Upload armv6 artifact ${{ matrix.base_image }}" + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0 with: - name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6 - path: vaultwarden-armv6 + name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6-${{ matrix.base_image }} + path: vaultwarden-armv6-${{ matrix.base_image }} + + - name: "Attest artifacts ${{ matrix.base_image }}" + uses: actions/attest-build-provenance@520d128f165991a6c774bcb264f323e3d70747f4 # v2.2.0 + with: + subject-path: vaultwarden-* # End Upload artifacts to Github Actions diff --git a/.github/workflows/releasecache-cleanup.yml b/.github/workflows/releasecache-cleanup.yml index 6fd880bb..f62fccd3 100644 --- a/.github/workflows/releasecache-cleanup.yml +++ b/.github/workflows/releasecache-cleanup.yml @@ -1,3 +1,6 @@ +name: Cleanup +permissions: {} + on: workflow_dispatch: inputs: @@ -9,10 +12,11 @@ on: schedule: - cron: '0 1 * * FRI' -name: Cleanup jobs: releasecache-cleanup: name: Releasecache Cleanup + permissions: + packages: write runs-on: ubuntu-24.04 continue-on-error: true timeout-minutes: 30 diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 48a8bc1e..6cba5df4 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -1,37 +1,45 @@ -name: trivy +name: Trivy +permissions: {} on: push: branches: - main + tags: - '*' + pull_request: - branches: [ "main" ] + branches: + - main + schedule: - cron: '08 11 * * *' -permissions: - contents: read - jobs: trivy-scan: - # Only run this in the master repo and not on forks + # Only run this in the upstream repo and not on forks # When all forks run this at the same time, it is causing `Too Many Requests` issues if: ${{ github.repository == 'dani-garcia/vaultwarden' }} - name: Check - runs-on: ubuntu-24.04 - timeout-minutes: 30 + name: Trivy Scan permissions: contents: read - security-events: write actions: read + security-events: write + runs-on: ubuntu-24.04 + timeout-minutes: 30 + steps: - name: Checkout code - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 #v4.2.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2 + with: + persist-credentials: false - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@5681af892cd0f4997658e2bacc62bd0a894cf564 # v0.27.0 + uses: aquasecurity/trivy-action@18f2510ee396bbf400402947b394f2dd8c87dbb0 # v0.29.0 + env: + TRIVY_DB_REPOSITORY: docker.io/aquasec/trivy-db:2,public.ecr.aws/aquasecurity/trivy-db:2,ghcr.io/aquasecurity/trivy-db:2 + TRIVY_JAVA_DB_REPOSITORY: docker.io/aquasec/trivy-java-db:1,public.ecr.aws/aquasecurity/trivy-java-db:1,ghcr.io/aquasecurity/trivy-java-db:1 with: scan-type: repo ignore-unfixed: true @@ -40,6 +48,6 @@ jobs: severity: CRITICAL,HIGH - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@2bbafcdd7fbf96243689e764c2f15d9735164f33 # v3.26.6 + uses: github/codeql-action/upload-sarif@86b04fb0e47484f7282357688f21d5d0e32175fe # v3.27.5 with: sarif_file: 'trivy-results.sarif' diff --git a/Cargo.lock b/Cargo.lock index 9edd20bf..77eabf5b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" @@ -26,7 +26,7 @@ dependencies = [ "cfg-if", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -55,9 +55,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.19" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611cc2ae7d2e242c457e4be7f97036b8ad9ca152b499f53faf99b1ed8fc2553f" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "android-tzdata" @@ -111,9 +111,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cb8f1d480b0ea3783ab015936d2a55c87e219676f0c0b7dec61494043f21857" +checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" dependencies = [ "brotli", "flate2", @@ -176,7 +176,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "event-listener-strategy", "pin-project-lite", ] @@ -194,7 +194,7 @@ dependencies = [ "async-task", "blocking", "cfg-if", - "event-listener 5.3.1", + "event-listener 5.4.0", "futures-lite", "rustix", "tracing", @@ -275,9 +275,9 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" dependencies = [ "proc-macro2", "quote", @@ -352,9 +352,9 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bigdecimal" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f850665a0385e070b64c38d2354e6c104c8479c59868d1e48a0c13ee2c7a1c1" +checksum = "7f31f3af01c5c65a07985c804d3366560e6fa7883d640a122819b14ec327482c" dependencies = [ "autocfg", "libm", @@ -371,15 +371,9 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bitflags" -version = "1.3.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" [[package]] name = "blake2" @@ -425,9 +419,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.1" +version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" +checksum = "74fa05ad7d803d413eb8380983b092cbbaf9a85f151b871360e7b00cd7060b37" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -435,15 +429,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.16.0" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "bytemuck" -version = "1.19.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" [[package]] name = "byteorder" @@ -453,9 +447,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" [[package]] name = "cached" @@ -470,7 +464,7 @@ dependencies = [ "futures", "hashbrown 0.14.5", "once_cell", - "thiserror", + "thiserror 1.0.69", "tokio", "web-time", ] @@ -495,9 +489,9 @@ checksum = "ade8366b8bd5ba243f0a58f036cc0ca8a2f069cff1a2351ef1cac6b083e16fc0" [[package]] name = "cc" -version = "1.1.37" +version = "1.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40545c26d092346d8a8dab71ee48e7685a7a9cba76e634790c215b41a4a7b4cf" +checksum = "e4730490333d58093109dc02c23174c3f4d490998c3fed3cc8e82d57afedb9cf" dependencies = [ "shlex", ] @@ -510,9 +504,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" dependencies = [ "android-tzdata", "iana-time-zone", @@ -523,9 +517,9 @@ dependencies = [ [[package]] name = "chrono-tz" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd6dd8046d00723a59a2f8c5f295c515b9bb9a331ee4f8f3d4dd49e428acd3b6" +checksum = "9c6ac4f2c0bf0f44e9161aec9675e1050aa4a530663c4a9e37e108fa948bca9f" dependencies = [ "chrono", "chrono-tz-build", @@ -586,7 +580,7 @@ checksum = "2eac901828f88a5241ee0600950ab981148a18f2f756900ffba1b125ca6a3ef9" dependencies = [ "cookie", "document-features", - "idna 1.0.3", + "idna", "log", "publicsuffix", "serde", @@ -614,9 +608,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] @@ -643,9 +637,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crypto-common" @@ -708,9 +702,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" [[package]] name = "data-url" @@ -727,6 +721,58 @@ dependencies = [ "powerfmt", ] +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn", +] + +[[package]] +name = "derive_more" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71158d5e914dec8a242751a3fc516b03ed3e6772ce9de79e1aeea6420663cad4" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e04e066e440d7973a852a3acdc25b0ae712bb6d311755fbf773d6a4518b2226" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + [[package]] name = "devise" version = "0.4.2" @@ -753,7 +799,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b035a542cf7abf01f2e3c4d5a7acbaebfefe120ae4efc7bde3df98186e4b8af7" dependencies = [ - "bitflags 2.6.0", + "bitflags", "proc-macro2", "proc-macro2-diagnostics", "quote", @@ -762,12 +808,12 @@ dependencies = [ [[package]] name = "diesel" -version = "2.2.4" +version = "2.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "158fe8e2e68695bd615d7e4f3227c0727b151330d3e253b525086c348d055d5e" +checksum = "04001f23ba8843dc315804fa324000376084dfb1c30794ff68dd279e6e5696d5" dependencies = [ "bigdecimal", - "bitflags 2.6.0", + "bitflags", "byteorder", "chrono", "diesel_derives", @@ -784,6 +830,17 @@ dependencies = [ "url", ] +[[package]] +name = "diesel-derive-newtype" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5adf688c584fe33726ce0e2898f608a2a92578ac94a4a92fcecf73214fe0716" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "diesel_derives" version = "2.2.3" @@ -799,9 +856,9 @@ dependencies = [ [[package]] name = "diesel_logger" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23010b507517129dc9b11fb35f36d76fd2d3dd4c85232733697622e345375f2f" +checksum = "8074833fffb675cf22a6ee669124f65f02971e48dd520bb80c7473ff70aeaf95" dependencies = [ "diesel", "log", @@ -866,9 +923,9 @@ checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" [[package]] name = "dsl_auto_type" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d9abe6314103864cc2d8901b7ae224e0ab1a103a0a416661b4097b0779b607" +checksum = "139ae9aca7527f85f26dd76483eb38533fd84bd571065da1739656ef71c5ff5b" dependencies = [ "darling", "either", @@ -886,9 +943,9 @@ checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "email-encoding" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60d1d33cdaede7e24091f039632eb5d3c7469fe5b066a985281a34fc70fa317f" +checksum = "ea3d894bbbab314476b265f9b2d46bf24b123a36dd0e96b06a1b49545b9d9dcc" dependencies = [ "base64 0.22.1", "memchr", @@ -924,6 +981,12 @@ dependencies = [ "syn", ] +[[package]] +name = "env_home" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7f84e12ccf0a7ddc17a6c41c93326024c42920d7ee630d04950e6926645c0fe" + [[package]] name = "equivalent" version = "1.0.1" @@ -932,21 +995,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", -] - -[[package]] -name = "error-chain" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" -dependencies = [ - "version_check", + "windows-sys 0.59.0", ] [[package]] @@ -957,9 +1011,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.3.1" +version = "5.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" dependencies = [ "concurrent-queue", "parking", @@ -968,25 +1022,25 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "pin-project-lite", ] [[package]] name = "fastrand" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "fern" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69ff9c9d5fb3e6da8ac2f77ab76fe7e8087d512ce095200f8f29ac5b656cf6dc" +checksum = "4316185f709b23713e41e3195f90edef7fb00c3ed4adc79769cf09cc762a3b29" dependencies = [ "libc", "log", @@ -1010,9 +1064,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.34" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", "miniz_oxide", @@ -1098,9 +1152,9 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" dependencies = [ "fastrand", "futures-core", @@ -1188,10 +1242,22 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets 0.52.6", +] + [[package]] name = "gimli" version = "0.31.1" @@ -1200,9 +1266,9 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "gloo-timers" @@ -1218,9 +1284,9 @@ dependencies = [ [[package]] name = "governor" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0746aa765db78b521451ef74221663b57ba595bf83f75d0ce23cc09447c8139f" +checksum = "842dc78579ce01e6a1576ad896edc92fca002dd60c9c3746b7fc2bec6fb429d0" dependencies = [ "cfg-if", "dashmap", @@ -1232,7 +1298,7 @@ dependencies = [ "parking_lot", "portable-atomic", "quanta", - "rand", + "rand 0.8.5", "smallvec", "spinning_top", ] @@ -1252,35 +1318,16 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.26" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.12", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "h2" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http 1.1.0", + "http 1.2.0", "indexmap", "slab", "tokio", @@ -1296,17 +1343,18 @@ checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" [[package]] name = "handlebars" -version = "6.2.0" +version = "6.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd4ccde012831f9a071a637b0d4e31df31c0f6c525784b35ae76a9ac6bc1e315" +checksum = "3d6b224b95c1e668ac0270325ad563b2eef1469fbbb8959bc7c692c844b813d9" dependencies = [ + "derive_builder", "log", "num-order", "pest", "pest_derive", "serde", "serde_json", - "thiserror", + "thiserror 2.0.11", "walkdir", ] @@ -1322,9 +1370,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.1" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" [[package]] name = "heck" @@ -1346,9 +1394,9 @@ checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" [[package]] name = "hickory-proto" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +checksum = "447afdcdb8afb9d0a852af6dc65d9b285ce720ed7a59e42a8bf2e931c67bc1b5" dependencies = [ "async-trait", "cfg-if", @@ -1357,11 +1405,11 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna 0.4.0", + "idna", "ipnet", "once_cell", - "rand", - "thiserror", + "rand 0.8.5", + "thiserror 1.0.69", "tinyvec", "tokio", "tracing", @@ -1370,9 +1418,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" dependencies = [ "cfg-if", "futures-util", @@ -1381,10 +1429,10 @@ dependencies = [ "lru-cache", "once_cell", "parking_lot", - "rand", + "rand 0.8.5", "resolv-conf", "smallvec", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -1398,15 +1446,6 @@ dependencies = [ "digest", ] -[[package]] -name = "home" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" -dependencies = [ - "windows-sys 0.52.0", -] - [[package]] name = "hostname" version = "0.3.1" @@ -1431,9 +1470,9 @@ dependencies = [ [[package]] name = "html5gum" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91b361633dcc40096d01de35ed535b6089be91880be47b6fd8f560497af7f716" +checksum = "b3918b5f36d61861b757261da986b51be562c7a87ac4e531d4158e67e08bff72" dependencies = [ "jetscii", ] @@ -1451,9 +1490,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -1478,7 +1517,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.1.0", + "http 1.2.0", ] [[package]] @@ -1489,16 +1528,16 @@ checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "pin-project-lite", ] [[package]] name = "httparse" -version = "1.9.5" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" +checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" [[package]] name = "httpdate" @@ -1508,15 +1547,14 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.31" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", "httparse", @@ -1532,15 +1570,15 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.6", - "http 1.1.0", + "h2", + "http 1.2.0", "http-body 1.0.1", "httparse", "itoa", @@ -1552,34 +1590,21 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.3" +version = "0.27.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" +checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" dependencies = [ "futures-util", - "http 1.1.0", - "hyper 1.5.0", + "http 1.2.0", + "hyper 1.6.0", "hyper-util", - "rustls 0.23.16", + "rustls 0.23.22", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls 0.26.1", "tower-service", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper 0.14.31", - "native-tls", - "tokio", - "tokio-native-tls", -] - [[package]] name = "hyper-tls" version = "0.6.0" @@ -1588,7 +1613,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.5.0", + "hyper 1.6.0", "hyper-util", "native-tls", "tokio", @@ -1605,9 +1630,9 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", - "hyper 1.5.0", + "hyper 1.6.0", "pin-project-lite", "socket2", "tokio", @@ -1762,26 +1787,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" -[[package]] -name = "idna" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "1.0.3" @@ -1805,12 +1810,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" dependencies = [ "equivalent", - "hashbrown 0.15.1", + "hashbrown 0.15.2", "serde", ] @@ -1834,26 +1839,26 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "is-terminal" -version = "0.4.13" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" +checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" dependencies = [ "hermit-abi 0.4.0", "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jetscii" @@ -1874,10 +1879,11 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -1922,9 +1928,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "lettre" -version = "0.11.10" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0161e452348e399deb685ba05e55ee116cae9410f4f51fe42d597361444521d9" +checksum = "e882e1489810a45919477602194312b1a7df0e5acc30a6188be7b520268f63f8" dependencies = [ "async-std", "async-trait", @@ -1937,7 +1943,7 @@ dependencies = [ "futures-util", "hostname 0.4.0", "httpdate", - "idna 1.0.3", + "idna", "mime", "native-tls", "nom", @@ -1953,9 +1959,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.162" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libm" @@ -1975,9 +1981,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.30.1" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" +checksum = "ad8935b44e7c13394a179a438e0cebba0fe08fe01b54f152e29a93b5cf993fd4" dependencies = [ "cc", "pkg-config", @@ -1992,15 +1998,15 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" [[package]] name = "litrs" @@ -2020,9 +2026,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" dependencies = [ "value-bag", ] @@ -2051,6 +2057,14 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "macros" +version = "0.1.0" +dependencies = [ + "quote", + "syn", +] + [[package]] name = "match_cfg" version = "0.1.0" @@ -2116,22 +2130,21 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" dependencies = [ "adler2", ] [[package]] name = "mio" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ - "hermit-abi 0.3.9", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] @@ -2144,7 +2157,7 @@ dependencies = [ "bytes", "encoding_rs", "futures-util", - "http 1.1.0", + "http 1.2.0", "httparse", "memchr", "mime", @@ -2156,9 +2169,9 @@ dependencies = [ [[package]] name = "mysqlclient-sys" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478e2040dbc35c73927b77a2be91a496de19deab376a6982ed61e89592434619" +checksum = "6bbb9b017b98c4cde5802998113e182eecc1ebce8d47e9ea1697b9a623d53870" dependencies = [ "pkg-config", "vcpkg", @@ -2166,9 +2179,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +checksum = "0dab59f8e050d5df8e4dd87d9206fb6f65a483e20ac9fda365ade4fab353196c" dependencies = [ "libc", "log", @@ -2294,9 +2307,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] @@ -2309,11 +2322,11 @@ checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "openssl" -version = "0.10.68" +version = "0.10.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" +checksum = "61cfb4e166a8bb8c9b55c500bc2308550148ece889be90f609377e58140f42c6" dependencies = [ - "bitflags 2.6.0", + "bitflags", "cfg-if", "foreign-types", "libc", @@ -2335,24 +2348,24 @@ dependencies = [ [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-src" -version = "300.4.0+3.4.0" +version = "300.4.1+3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a709e02f2b4aca747929cca5ed248880847c650233cf8b8cdc48f40aaf4898a6" +checksum = "faa4eac4138c62414b5622d1b31c5c304f34b406b013c079c2bbc652fdd6678c" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.104" +version = "0.9.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" +checksum = "8b22d5b84be05a8d6947c7cb71f7c849aa0f112acd4bf51c2a7c1c988ac0a9dc" dependencies = [ "cc", "libc", @@ -2412,7 +2425,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -2463,20 +2476,20 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" +checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror", + "thiserror 2.0.11", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d214365f632b123a47fd913301e14c946c61d1c183ee245fa76eb752e59a02dd" +checksum = "816518421cfc6887a0d62bf441b6ffb4536fcc926395a69e1a85852d4363f57e" dependencies = [ "pest", "pest_generator", @@ -2484,9 +2497,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb55586734301717aea2ac313f50b2eb8f60d2fc3dc01d190eefa2e625f60c4e" +checksum = "7d1396fd3a870fc7838768d171b4616d5c91f6cc25e377b673d714567d99377b" dependencies = [ "pest", "pest_meta", @@ -2497,9 +2510,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b75da2a70cf4d9cb76833c990ac9cd3923c9a8905a8929789ce347c84564d03d" +checksum = "e1e58089ea25d717bfd31fb534e4f3afcc2cc569c70de3e239778991ea3b7dea" dependencies = [ "once_cell", "pest", @@ -2508,9 +2521,9 @@ dependencies = [ [[package]] name = "phf" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ "phf_macros", "phf_shared", @@ -2518,9 +2531,9 @@ dependencies = [ [[package]] name = "phf_codegen" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d39688d359e6b34654d328e262234662d16cc0f60ec8dcbe5e718709342a5a" +checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" dependencies = [ "phf_generator", "phf_shared", @@ -2528,19 +2541,19 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ "phf_shared", - "rand", + "rand 0.8.5", ] [[package]] name = "phf_macros" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" dependencies = [ "phf_generator", "phf_shared", @@ -2551,9 +2564,9 @@ dependencies = [ [[package]] name = "phf_shared" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ "siphasher", ] @@ -2566,9 +2579,9 @@ checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -2610,9 +2623,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" [[package]] name = "powerfmt" @@ -2626,23 +2639,24 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "zerocopy", + "zerocopy 0.7.35", ] [[package]] name = "pq-sys" -version = "0.6.3" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6cc05d7ea95200187117196eee9edd0644424911821aeb28a18ce60ea0b8793" +checksum = "30b51d65ebe1cb1f40641b15abae017fed35ccdda46e3dab1ff8768f625a3222" dependencies = [ + "libc", "vcpkg", ] [[package]] name = "proc-macro2" -version = "1.0.89" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] @@ -2668,34 +2682,34 @@ checksum = "33cb294fe86a74cbcf50d4445b37da762029549ebeea341421c7c70370f86cac" [[package]] name = "psm" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa37f80ca58604976033fae9515a8a2989fc13797d953f7c04fb8fa36a11f205" +checksum = "200b9ff220857e53e184257720a14553b2f4aa02577d2ed9842d45d4b9654810" dependencies = [ "cc", ] [[package]] name = "publicsuffix" -version = "2.2.3" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96a8c1bda5ae1af7f99a2962e49df150414a43d62404644d98dd5c3a93d07457" +checksum = "6f42ea446cab60335f76979ec15e12619a2165b5ae2c12166bef27d283a9fadf" dependencies = [ - "idna 0.3.0", + "idna", "psl-types", ] [[package]] name = "quanta" -version = "0.12.3" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" +checksum = "3bd1fe6824cea6538803de3ff1bc0cf3949024db3d43c9643024bfb33a807c0e" dependencies = [ "crossbeam-utils", "libc", "once_cell", "raw-cpuid", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "web-sys", "winapi", ] @@ -2708,9 +2722,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -2739,8 +2753,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.0", + "zerocopy 0.8.14", ] [[package]] @@ -2750,7 +2775,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.0", ] [[package]] @@ -2759,25 +2794,35 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", +] + +[[package]] +name = "rand_core" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" +dependencies = [ + "getrandom 0.3.1", + "zerocopy 0.8.14", ] [[package]] name = "raw-cpuid" -version = "11.2.0" +version = "11.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab240315c661615f2ee9f0f2cd32d5a7343a84d5ebcccb99d46e6637565e7b0" +checksum = "c6928fa44c097620b706542d428957635951bade7143269085389d42c8a4927e" dependencies = [ - "bitflags 2.6.0", + "bitflags", ] [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 2.6.0", + "bitflags", ] [[package]] @@ -2808,7 +2853,7 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -2823,9 +2868,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -2857,49 +2902,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.27" +version = "0.12.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.31", - "hyper-tls 0.5.0", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls-pemfile 1.0.4", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration 0.5.1", - "tokio", - "tokio-native-tls", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - -[[package]] -name = "reqwest" -version = "0.12.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" dependencies = [ "async-compression", "base64 0.22.1", @@ -2907,15 +2912,16 @@ dependencies = [ "cookie", "cookie_store", "encoding_rs", + "futures-channel", "futures-core", "futures-util", - "h2 0.4.6", - "http 1.1.0", + "h2", + "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.0", + "hyper 1.6.0", "hyper-rustls", - "hyper-tls 0.6.0", + "hyper-tls", "hyper-util", "ipnet", "js-sys", @@ -2929,12 +2935,13 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.1", - "system-configuration 0.6.1", + "sync_wrapper", + "system-configuration", "tokio", "tokio-native-tls", "tokio-socks", "tokio-util", + "tower", "tower-service", "url", "wasm-bindgen", @@ -2962,7 +2969,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.15", "libc", "spin", "untrusted", @@ -3011,7 +3018,7 @@ dependencies = [ "num_cpus", "parking_lot", "pin-project-lite", - "rand", + "rand 0.8.5", "ref-cast", "rocket_codegen", "rocket_http", @@ -3055,7 +3062,7 @@ dependencies = [ "either", "futures", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "indexmap", "log", "memchr", @@ -3114,15 +3121,15 @@ checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustix" -version = "0.38.40" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.6.0", + "bitflags", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3139,9 +3146,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.16" +version = "0.23.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" +checksum = "9fb9263ab4eb695e42321db096e3b8fbd715a59b154d5c88d82db2175b681ba7" dependencies = [ "once_cell", "rustls-pki-types", @@ -3170,9 +3177,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" [[package]] name = "rustls-webpki" @@ -3197,15 +3204,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "ryu" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" [[package]] name = "same-file" @@ -3218,9 +3225,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] @@ -3262,7 +3269,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.6.0", + "bitflags", "core-foundation", "core-foundation-sys", "libc", @@ -3271,9 +3278,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.1" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -3281,15 +3288,15 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" [[package]] name = "serde" -version = "1.0.214" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] @@ -3306,9 +3313,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.214" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", @@ -3317,9 +3324,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" dependencies = [ "itoa", "memchr", @@ -3406,21 +3413,21 @@ dependencies = [ [[package]] name = "simple_asn1" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" +checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror", + "thiserror 2.0.11", "time", ] [[package]] name = "siphasher" -version = "0.3.11" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" [[package]] name = "slab" @@ -3439,9 +3446,9 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -3513,9 +3520,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.87" +version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ "proc-macro2", "quote", @@ -3524,15 +3531,9 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - -[[package]] -name = "sync_wrapper" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" dependencies = [ "futures-core", ] @@ -3550,47 +3551,25 @@ dependencies = [ [[package]] name = "syslog" -version = "6.1.1" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc7e95b5b795122fafe6519e27629b5ab4232c73ebb2428f568e82b1a457ad3" +checksum = "019f1500a13379b7d051455df397c75770de6311a7a188a699499502704d9f10" dependencies = [ - "error-chain", - "hostname 0.3.1", + "hostname 0.4.0", "libc", "log", "time", ] -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "system-configuration-sys 0.5.0", -] - [[package]] name = "system-configuration" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.6.0", + "bitflags", "core-foundation", - "system-configuration-sys 0.6.0", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", + "system-configuration-sys", ] [[package]] @@ -3605,12 +3584,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.14.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +checksum = "38c246215d7d24f48ae091a2902398798e05d978b24315d6efbc00ede9a8bb91" dependencies = [ "cfg-if", "fastrand", + "getrandom 0.3.1", "once_cell", "rustix", "windows-sys 0.59.0", @@ -3622,7 +3602,16 @@ version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +dependencies = [ + "thiserror-impl 2.0.11", ] [[package]] @@ -3636,6 +3625,17 @@ dependencies = [ "syn", ] +[[package]] +name = "thiserror-impl" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "thread_local" version = "1.1.8" @@ -3657,9 +3657,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -3680,9 +3680,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", @@ -3700,9 +3700,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -3715,9 +3715,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.1" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", "bytes", @@ -3733,9 +3733,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", @@ -3764,12 +3764,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ - "rustls 0.23.16", - "rustls-pki-types", + "rustls 0.23.22", "tokio", ] @@ -3781,15 +3780,15 @@ checksum = "0d4770b8024672c1101b3f6733eab95b18007dbe0847a8afe341fcf79e06043f" dependencies = [ "either", "futures-util", - "thiserror", + "thiserror 1.0.69", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -3810,9 +3809,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -3844,9 +3843,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.22" +version = "0.22.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" dependencies = [ "indexmap", "serde", @@ -3867,6 +3866,27 @@ dependencies = [ "sha2", ] +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + [[package]] name = "tower-service" version = "0.3.3" @@ -3875,9 +3895,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -3887,9 +3907,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", @@ -3898,9 +3918,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -3919,9 +3939,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -3950,12 +3970,12 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http 1.1.0", + "http 1.2.0", "httparse", "log", - "rand", + "rand 0.8.5", "sha1", - "thiserror", + "thiserror 1.0.69", "url", "utf-8", ] @@ -3991,26 +4011,11 @@ dependencies = [ "version_check", ] -[[package]] -name = "unicode-bidi" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" - [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" - -[[package]] -name = "unicode-normalization" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" -dependencies = [ - "tinyvec", -] +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" [[package]] name = "unicode-xid" @@ -4026,12 +4031,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.3" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 1.0.3", + "idna", "percent-encoding", "serde", ] @@ -4056,18 +4061,18 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.11.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" +checksum = "b3758f5e68192bb96cc8f9b7e2c2cfdabb435499a28499a42f8f984092adad4b" dependencies = [ - "getrandom", + "getrandom 0.2.15", ] [[package]] name = "valuable" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "value-bag" @@ -4090,7 +4095,9 @@ dependencies = [ "dashmap", "data-encoding", "data-url", + "derive_more", "diesel", + "diesel-derive-newtype", "diesel_logger", "diesel_migrations", "dotenvy", @@ -4107,6 +4114,7 @@ dependencies = [ "lettre", "libsqlite3-sys", "log", + "macros", "mimalloc", "num-derive", "num-traits", @@ -4115,9 +4123,9 @@ dependencies = [ "paste", "percent-encoding", "pico-args", - "rand", + "rand 0.9.0", "regex", - "reqwest 0.12.9", + "reqwest", "ring", "rmpv", "rocket", @@ -4176,25 +4184,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] -name = "wasm-bindgen" -version = "0.2.95" +name = "wasi" +version = "0.13.3+wasi-0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if", "once_cell", + "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", "syn", @@ -4203,21 +4220,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.45" +version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4225,9 +4243,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", @@ -4238,9 +4256,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] [[package]] name = "wasm-streams" @@ -4257,9 +4278,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.72" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" dependencies = [ "js-sys", "wasm-bindgen", @@ -4284,24 +4305,24 @@ dependencies = [ "base64 0.13.1", "nom", "openssl", - "rand", + "rand 0.8.5", "serde", "serde_cbor", "serde_derive", "serde_json", - "thiserror", + "thiserror 1.0.69", "tracing", "url", ] [[package]] name = "which" -version = "7.0.0" +version = "7.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9cad3279ade7346b96e38731a641d7343dd6a53d55083dd54eadfa5a1b38c6b" +checksum = "fb4a9e33648339dc1642b0e36e21b3385e6148e289226f657c809dee59df5028" dependencies = [ "either", - "home", + "env_home", "rustix", "winsafe", ] @@ -4551,9 +4572,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.20" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "86e376c75f4f43f44db463cf729e0d3acbf954d13e22c51e26e4c264b4ab545f" dependencies = [ "memchr", ] @@ -4574,6 +4595,15 @@ version = "0.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags", +] + [[package]] name = "write16" version = "1.0.0" @@ -4597,9 +4627,9 @@ dependencies = [ [[package]] name = "yoke" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" dependencies = [ "serde", "stable_deref_trait", @@ -4609,9 +4639,9 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", @@ -4621,16 +4651,15 @@ dependencies = [ [[package]] name = "yubico" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "173f75d2c4010429a2d74ae3a114a69930c59e2b1a4c97b1c75d259a4960d5fb" +version = "0.12.0" +source = "git+https://github.com/BlackDex/yubico-rs?rev=00df14811f58155c0f02e3ab10f1570ed3e115c6#00df14811f58155c0f02e3ab10f1570ed3e115c6" dependencies = [ - "base64 0.13.1", + "base64 0.22.1", "form_urlencoded", "futures", "hmac", - "rand", - "reqwest 0.11.27", + "rand 0.8.5", + "reqwest", "sha1", "threadpool", ] @@ -4642,7 +4671,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", - "zerocopy-derive", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a367f292d93d4eab890745e75a778da40909cab4d6ff8173693812f79c4a2468" +dependencies = [ + "zerocopy-derive 0.8.14", ] [[package]] @@ -4657,19 +4695,30 @@ dependencies = [ ] [[package]] -name = "zerofrom" -version = "0.1.4" +name = "zerocopy-derive" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +checksum = "d3931cb58c62c13adec22e38686b559c86a30565e16ad6e8510a337cedc611e1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 150b3b9d..2542e3c6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,9 +1,11 @@ +workspace = { members = ["macros"] } + [package] name = "vaultwarden" version = "1.0.0" authors = ["Daniel García "] edition = "2021" -rust-version = "1.80.0" +rust-version = "1.83.0" resolver = "2" repository = "https://github.com/dani-garcia/vaultwarden" @@ -36,13 +38,15 @@ unstable = [] [target."cfg(unix)".dependencies] # Logging -syslog = "6.1.1" +syslog = "7.0.0" [dependencies] +macros = { path = "./macros" } + # Logging -log = "0.4.22" -fern = { version = "0.7.0", features = ["syslog-6", "reopen-1"] } -tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work +log = "0.4.25" +fern = { version = "0.7.1", features = ["syslog-7", "reopen-1"] } +tracing = { version = "0.1.41", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work # A `dotenv` implementation for Rust dotenvy = { version = "0.15.7", default-features = false } @@ -53,7 +57,7 @@ once_cell = "1.20.2" # Numerical libraries num-traits = "0.2.19" num-derive = "0.4.2" -bigdecimal = "0.4.6" +bigdecimal = "0.4.7" # Web framework rocket = { version = "0.5.1", features = ["tls", "json"], default-features = false } @@ -67,37 +71,40 @@ dashmap = "6.1.0" # Async futures futures = "0.3.31" -tokio = { version = "1.41.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] } +tokio = { version = "1.43.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] } # A generic serialization/deserialization framework -serde = { version = "1.0.214", features = ["derive"] } -serde_json = "1.0.132" +serde = { version = "1.0.217", features = ["derive"] } +serde_json = "1.0.138" # A safe, extensible ORM and Query builder -diesel = { version = "2.2.4", features = ["chrono", "r2d2", "numeric"] } +diesel = { version = "2.2.7", features = ["chrono", "r2d2", "numeric"] } diesel_migrations = "2.2.0" -diesel_logger = { version = "0.3.0", optional = true } +diesel_logger = { version = "0.4.0", optional = true } + +derive_more = { version = "2.0.0", features = ["from", "into", "as_ref", "deref", "display"] } +diesel-derive-newtype = "2.1.2" # Bundled/Static SQLite -libsqlite3-sys = { version = "0.30.1", features = ["bundled"], optional = true } +libsqlite3-sys = { version = "0.31.0", features = ["bundled"], optional = true } # Crypto-related libraries -rand = { version = "0.8.5", features = ["small_rng"] } +rand = "0.9.0" ring = "0.17.8" # UUID generation -uuid = { version = "1.11.0", features = ["v4"] } +uuid = { version = "1.12.1", features = ["v4"] } # Date and time libraries -chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false } -chrono-tz = "0.10.0" -time = "0.3.36" +chrono = { version = "0.4.39", features = ["clock", "serde"], default-features = false } +chrono-tz = "0.10.1" +time = "0.3.37" # Job scheduler job_scheduler_ng = "2.0.5" # Data encoding library Hex/Base32/Base64 -data-encoding = "2.6.0" +data-encoding = "2.7.0" # JWT library jsonwebtoken = "9.3.0" @@ -106,31 +113,31 @@ jsonwebtoken = "9.3.0" totp-lite = "2.0.1" # Yubico Library -yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false } +yubico = { version = "0.12.0", features = ["online-tokio"], default-features = false } # WebAuthn libraries webauthn-rs = "0.3.2" # Handling of URL's for WebAuthn and favicons -url = "2.5.3" +url = "2.5.4" # Email libraries -lettre = { version = "0.11.10", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false } +lettre = { version = "0.11.12", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false } percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails email_address = "0.2.9" # HTML Template library -handlebars = { version = "6.2.0", features = ["dir_source"] } +handlebars = { version = "6.3.0", features = ["dir_source"] } # HTTP client (Used for favicons, version check, DUO and HIBP API) -reqwest = { version = "0.12.9", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] } -hickory-resolver = "0.24.1" +reqwest = { version = "0.12.12", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] } +hickory-resolver = "0.24.2" # Favicon extraction libraries -html5gum = "0.6.1" +html5gum = "0.7.0" regex = { version = "1.11.1", features = ["std", "perf", "unicode-perl"], default-features = false } data-url = "0.3.1" -bytes = "1.8.0" +bytes = "1.10.0" # Cache function results (Used for version check and favicon fetching) cached = { version = "0.54.0", features = ["async"] } @@ -140,22 +147,22 @@ cookie = "0.18.1" cookie_store = "0.21.1" # Used by U2F, JWT and PostgreSQL -openssl = "0.10.68" +openssl = "0.10.70" # CLI argument parsing pico-args = "0.5.0" # Macro ident concatenation paste = "1.0.15" -governor = "0.7.0" +governor = "0.8.0" # Check client versions for specific features. -semver = "1.0.23" +semver = "1.0.25" # Allow overriding the default memory allocator # Mainly used for the musl builds, since the default musl malloc is very slow mimalloc = { version = "0.1.43", features = ["secure"], default-features = false, optional = true } -which = "7.0.0" +which = "7.0.1" # Argon2 library with support for the PHC format argon2 = "0.5.3" @@ -166,6 +173,10 @@ rpassword = "7.3.1" # Loading a dynamic CSS Stylesheet grass_compiler = { version = "0.13.4", default-features = false } +[patch.crates-io] +# Patch yubico to remove duplicate crates of older versions +yubico = { git = "https://github.com/BlackDex/yubico-rs", rev = "00df14811f58155c0f02e3ab10f1570ed3e115c6" } + # Strip debuginfo from the release builds # The symbols are the provide better panic traces # Also enable fat LTO and use 1 codegen unit for optimizations @@ -216,7 +227,8 @@ noop_method_call = "deny" refining_impl_trait = { level = "deny", priority = -1 } rust_2018_idioms = { level = "deny", priority = -1 } rust_2021_compatibility = { level = "deny", priority = -1 } -# rust_2024_compatibility = { level = "deny", priority = -1 } # Enable once we are at MSRV 1.81.0 +rust_2024_compatibility = { level = "deny", priority = -1 } +edition_2024_expr_fragment_specifier = "allow" # Once changed to Rust 2024 this should be removed and macro's should be validated again single_use_lifetimes = "deny" trivial_casts = "deny" trivial_numeric_casts = "deny" @@ -225,9 +237,10 @@ unused_import_braces = "deny" unused_lifetimes = "deny" unused_qualifications = "deny" variant_size_differences = "deny" -# The lints below are part of the rust_2024_compatibility group -static-mut-refs = "deny" -unsafe-op-in-unsafe-fn = "deny" +# Allow the following lints since these cause issues with Rust v1.84.0 or newer +# Building Vaultwarden with Rust v1.85.0 and edition 2024 also works without issues +if_let_rescope = "allow" +tail_expr_drop_order = "allow" # https://rust-lang.github.io/rust-clippy/stable/index.html [lints.clippy] diff --git a/SECURITY.md b/SECURITY.md index 0917981c..4d23e51c 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -21,7 +21,7 @@ notify us. We welcome working with you to resolve the issue promptly. Thanks in The following bug classes are out-of scope: - Bugs that are already reported on Vaultwarden's issue tracker (https://github.com/dani-garcia/vaultwarden/issues) -- Bugs that are not part of Vaultwarden, like on the the web-vault or mobile and desktop clients. These issues need to be reported in the respective project issue tracker at https://github.com/bitwarden to which we are not associated +- Bugs that are not part of Vaultwarden, like on the web-vault or mobile and desktop clients. These issues need to be reported in the respective project issue tracker at https://github.com/bitwarden to which we are not associated - Issues in an upstream software dependency (ex: Rust, or External Libraries) which are already reported to the upstream maintainer - Attacks requiring physical access to a user's device - Issues related to software or protocols not under Vaultwarden's control diff --git a/docker/DockerSettings.yaml b/docker/DockerSettings.yaml index c4c541fb..84ca87ba 100644 --- a/docker/DockerSettings.yaml +++ b/docker/DockerSettings.yaml @@ -1,13 +1,13 @@ --- -vault_version: "v2024.6.2c" -vault_image_digest: "sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b" -# Cross Compile Docker Helper Scripts v1.5.0 +vault_version: "v2025.1.1" +vault_image_digest: "sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918" +# Cross Compile Docker Helper Scripts v1.6.1 # We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts # https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags -xx_image_digest: "sha256:1978e7a58a1777cb0ef0dde76bad60b7914b21da57cfa88047875e4f364297aa" -rust_version: 1.82.0 # Rust version to be used +xx_image_digest: "sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894" +rust_version: 1.84.1 # Rust version to be used debian_version: bookworm # Debian release name to be used -alpine_version: "3.20" # Alpine version to be used +alpine_version: "3.21" # Alpine version to be used # For which platforms/architectures will we try to build images platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"] # Determine the build images per OS/Arch diff --git a/docker/Dockerfile.alpine b/docker/Dockerfile.alpine index c6c85003..2ea9967b 100644 --- a/docker/Dockerfile.alpine +++ b/docker/Dockerfile.alpine @@ -19,23 +19,23 @@ # - From https://hub.docker.com/r/vaultwarden/web-vault/tags, # click the tag name to view the digest of the image it currently points to. # - From the command line: -# $ docker pull docker.io/vaultwarden/web-vault:v2024.6.2c -# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.6.2c -# [docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b] +# $ docker pull docker.io/vaultwarden/web-vault:v2025.1.1 +# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.1.1 +# [docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918] # # - Conversely, to get the tag name from the digest: -# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b -# [docker.io/vaultwarden/web-vault:v2024.6.2c] +# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918 +# [docker.io/vaultwarden/web-vault:v2025.1.1] # -FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b AS vault +FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918 AS vault ########################## ALPINE BUILD IMAGES ########################## ## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 ## And for Alpine we define all build images here, they will only be loaded when actually used -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.82.0 AS build_amd64 -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.82.0 AS build_arm64 -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.82.0 AS build_armv7 -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.82.0 AS build_armv6 +FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.84.1 AS build_amd64 +FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.84.1 AS build_arm64 +FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.84.1 AS build_armv7 +FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.84.1 AS build_armv6 ########################## BUILD IMAGE ########################## # hadolint ignore=DL3006 @@ -76,6 +76,7 @@ RUN source /env-cargo && \ # Copies over *only* your manifests and build files COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./ +COPY ./macros ./macros ARG CARGO_PROFILE=release @@ -126,7 +127,7 @@ RUN source /env-cargo && \ # To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*' # # We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742 -FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.20 +FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.21 ENV ROCKET_PROFILE="release" \ ROCKET_ADDRESS=0.0.0.0 \ diff --git a/docker/Dockerfile.debian b/docker/Dockerfile.debian index eb502eb2..52f98526 100644 --- a/docker/Dockerfile.debian +++ b/docker/Dockerfile.debian @@ -19,24 +19,24 @@ # - From https://hub.docker.com/r/vaultwarden/web-vault/tags, # click the tag name to view the digest of the image it currently points to. # - From the command line: -# $ docker pull docker.io/vaultwarden/web-vault:v2024.6.2c -# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.6.2c -# [docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b] +# $ docker pull docker.io/vaultwarden/web-vault:v2025.1.1 +# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.1.1 +# [docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918] # # - Conversely, to get the tag name from the digest: -# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b -# [docker.io/vaultwarden/web-vault:v2024.6.2c] +# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918 +# [docker.io/vaultwarden/web-vault:v2025.1.1] # -FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b AS vault +FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918 AS vault ########################## Cross Compile Docker Helper Scripts ########################## ## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts ## And these bash scripts do not have any significant difference if at all -FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:1978e7a58a1777cb0ef0dde76bad60b7914b21da57cfa88047875e4f364297aa AS xx +FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894 AS xx ########################## BUILD IMAGE ########################## # hadolint ignore=DL3006 -FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.82.0-slim-bookworm AS build +FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.84.1-slim-bookworm AS build COPY --from=xx / / ARG TARGETARCH ARG TARGETVARIANT @@ -116,6 +116,7 @@ RUN source /env-cargo && \ # Copies over *only* your manifests and build files COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./ +COPY ./macros ./macros ARG CARGO_PROFILE=release diff --git a/docker/Dockerfile.j2 b/docker/Dockerfile.j2 index 372be95e..e64252e2 100644 --- a/docker/Dockerfile.j2 +++ b/docker/Dockerfile.j2 @@ -143,6 +143,7 @@ RUN source /env-cargo && \ # Copies over *only* your manifests and build files COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./ +COPY ./macros ./macros ARG CARGO_PROFILE=release diff --git a/docker/README.md b/docker/README.md index 2e78f534..f76cd35d 100644 --- a/docker/README.md +++ b/docker/README.md @@ -46,7 +46,7 @@ There also is an option to use an other docker container to provide support for ```bash # To install and activate docker run --privileged --rm tonistiigi/binfmt --install arm64,arm -# To unistall +# To uninstall docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*' ``` diff --git a/docker/docker-bake.hcl b/docker/docker-bake.hcl index 38e7ef97..2edf4fbb 100644 --- a/docker/docker-bake.hcl +++ b/docker/docker-bake.hcl @@ -17,7 +17,7 @@ variable "SOURCE_REPOSITORY_URL" { default = null } -// The commit hash of of the current commit this build was triggered on +// The commit hash of the current commit this build was triggered on variable "SOURCE_COMMIT" { default = null } diff --git a/macros/Cargo.toml b/macros/Cargo.toml new file mode 100644 index 00000000..323f198d --- /dev/null +++ b/macros/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "macros" +version = "0.1.0" +edition = "2021" + +[lib] +name = "macros" +path = "src/lib.rs" +proc-macro = true + +[dependencies] +quote = "1.0.38" +syn = "2.0.98" diff --git a/macros/src/lib.rs b/macros/src/lib.rs new file mode 100644 index 00000000..ec8863bb --- /dev/null +++ b/macros/src/lib.rs @@ -0,0 +1,58 @@ +extern crate proc_macro; + +use proc_macro::TokenStream; +use quote::quote; + +#[proc_macro_derive(UuidFromParam)] +pub fn derive_uuid_from_param(input: TokenStream) -> TokenStream { + let ast = syn::parse(input).unwrap(); + + impl_derive_uuid_macro(&ast) +} + +fn impl_derive_uuid_macro(ast: &syn::DeriveInput) -> TokenStream { + let name = &ast.ident; + let gen = quote! { + #[automatically_derived] + impl<'r> rocket::request::FromParam<'r> for #name { + type Error = (); + + #[inline(always)] + fn from_param(param: &'r str) -> Result { + if uuid::Uuid::parse_str(param).is_ok() { + Ok(Self(param.to_string())) + } else { + Err(()) + } + } + } + }; + gen.into() +} + +#[proc_macro_derive(IdFromParam)] +pub fn derive_id_from_param(input: TokenStream) -> TokenStream { + let ast = syn::parse(input).unwrap(); + + impl_derive_safestring_macro(&ast) +} + +fn impl_derive_safestring_macro(ast: &syn::DeriveInput) -> TokenStream { + let name = &ast.ident; + let gen = quote! { + #[automatically_derived] + impl<'r> rocket::request::FromParam<'r> for #name { + type Error = (); + + #[inline(always)] + fn from_param(param: &'r str) -> Result { + if param.chars().all(|c| matches!(c, 'a'..='z' | 'A'..='Z' |'0'..='9' | '-')) { + Ok(Self(param.to_string())) + } else { + Err(()) + } + } + } + }; + gen.into() +} diff --git a/migrations/mysql/2025-01-09-172300_add_manage/down.sql b/migrations/mysql/2025-01-09-172300_add_manage/down.sql new file mode 100644 index 00000000..e69de29b diff --git a/migrations/mysql/2025-01-09-172300_add_manage/up.sql b/migrations/mysql/2025-01-09-172300_add_manage/up.sql new file mode 100644 index 00000000..e234cc6e --- /dev/null +++ b/migrations/mysql/2025-01-09-172300_add_manage/up.sql @@ -0,0 +1,5 @@ +ALTER TABLE users_collections +ADD COLUMN manage BOOLEAN NOT NULL DEFAULT FALSE; + +ALTER TABLE collections_groups +ADD COLUMN manage BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/migrations/postgresql/2025-01-09-172300_add_manage/down.sql b/migrations/postgresql/2025-01-09-172300_add_manage/down.sql new file mode 100644 index 00000000..e69de29b diff --git a/migrations/postgresql/2025-01-09-172300_add_manage/up.sql b/migrations/postgresql/2025-01-09-172300_add_manage/up.sql new file mode 100644 index 00000000..e234cc6e --- /dev/null +++ b/migrations/postgresql/2025-01-09-172300_add_manage/up.sql @@ -0,0 +1,5 @@ +ALTER TABLE users_collections +ADD COLUMN manage BOOLEAN NOT NULL DEFAULT FALSE; + +ALTER TABLE collections_groups +ADD COLUMN manage BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/migrations/sqlite/2025-01-09-172300_add_manage/down.sql b/migrations/sqlite/2025-01-09-172300_add_manage/down.sql new file mode 100644 index 00000000..e69de29b diff --git a/migrations/sqlite/2025-01-09-172300_add_manage/up.sql b/migrations/sqlite/2025-01-09-172300_add_manage/up.sql new file mode 100644 index 00000000..4b4b07a5 --- /dev/null +++ b/migrations/sqlite/2025-01-09-172300_add_manage/up.sql @@ -0,0 +1,5 @@ +ALTER TABLE users_collections +ADD COLUMN manage BOOLEAN NOT NULL DEFAULT 0; -- FALSE + +ALTER TABLE collections_groups +ADD COLUMN manage BOOLEAN NOT NULL DEFAULT 0; -- FALSE diff --git a/rust-toolchain.toml b/rust-toolchain.toml index a74a99f4..5506e541 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "1.82.0" +channel = "1.84.1" components = [ "rustfmt", "clippy" ] profile = "minimal" diff --git a/src/api/admin.rs b/src/api/admin.rs index cc902e39..b3e703d9 100644 --- a/src/api/admin.rs +++ b/src/api/admin.rs @@ -50,7 +50,7 @@ pub fn routes() -> Vec { disable_user, enable_user, remove_2fa, - update_user_org_type, + update_membership_type, update_revision_users, post_config, delete_config, @@ -62,6 +62,7 @@ pub fn routes() -> Vec { diagnostics, get_diagnostics_config, resend_user_invite, + get_diagnostics_http, ] } @@ -98,6 +99,7 @@ const DT_FMT: &str = "%Y-%m-%d %H:%M:%S %Z"; const BASE_TEMPLATE: &str = "admin/base"; const ACTING_ADMIN_USER: &str = "vaultwarden-admin-00000-000000000000"; +pub const FAKE_ADMIN_UUID: &str = "00000000-0000-0000-0000-000000000000"; fn admin_path() -> String { format!("{}{}", CONFIG.domain_path(), ADMIN_PATH) @@ -169,7 +171,7 @@ struct LoginForm { redirect: Option, } -#[post("/", data = "")] +#[post("/", format = "application/x-www-form-urlencoded", data = "")] fn post_admin_login( data: Form, cookies: &CookieJar<'_>, @@ -279,15 +281,15 @@ struct InviteData { email: String, } -async fn get_user_or_404(uuid: &str, conn: &mut DbConn) -> ApiResult { - if let Some(user) = User::find_by_uuid(uuid, conn).await { +async fn get_user_or_404(user_id: &UserId, conn: &mut DbConn) -> ApiResult { + if let Some(user) = User::find_by_uuid(user_id, conn).await { Ok(user) } else { err_code!("User doesn't exist", Status::NotFound.code); } } -#[post("/invite", data = "")] +#[post("/invite", format = "application/json", data = "")] async fn invite_user(data: Json, _token: AdminToken, mut conn: DbConn) -> JsonResult { let data: InviteData = data.into_inner(); if User::find_by_mail(&data.email, &mut conn).await.is_some() { @@ -298,7 +300,9 @@ async fn invite_user(data: Json, _token: AdminToken, mut conn: DbCon async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult { if CONFIG.mail_enabled() { - mail::send_invite(user, None, None, &CONFIG.invitation_org_name(), None).await + let org_id: OrganizationId = FAKE_ADMIN_UUID.to_string().into(); + let member_id: MembershipId = FAKE_ADMIN_UUID.to_string().into(); + mail::send_invite(user, org_id, member_id, &CONFIG.invitation_org_name(), None).await } else { let invitation = Invitation::new(&user.email); invitation.save(conn).await @@ -311,7 +315,7 @@ async fn invite_user(data: Json, _token: AdminToken, mut conn: DbCon Ok(Json(user.to_json(&mut conn).await)) } -#[post("/test/smtp", data = "")] +#[post("/test/smtp", format = "application/json", data = "")] async fn test_smtp(data: Json, _token: AdminToken) -> EmptyResult { let data: InviteData = data.into_inner(); @@ -380,29 +384,29 @@ async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) } } -#[get("/users/")] -async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult { - let u = get_user_or_404(uuid, &mut conn).await?; +#[get("/users/")] +async fn get_user_json(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> JsonResult { + let u = get_user_or_404(&user_id, &mut conn).await?; let mut usr = u.to_json(&mut conn).await; usr["userEnabled"] = json!(u.enabled); usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT)); Ok(Json(usr)) } -#[post("/users//delete")] -async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult { - let user = get_user_or_404(uuid, &mut conn).await?; +#[post("/users//delete", format = "application/json")] +async fn delete_user(user_id: UserId, token: AdminToken, mut conn: DbConn) -> EmptyResult { + let user = get_user_or_404(&user_id, &mut conn).await?; - // Get the user_org records before deleting the actual user - let user_orgs = UserOrganization::find_any_state_by_user(uuid, &mut conn).await; + // Get the membership records before deleting the actual user + let memberships = Membership::find_any_state_by_user(&user_id, &mut conn).await; let res = user.delete(&mut conn).await; - for user_org in user_orgs { + for membership in memberships { log_event( - EventType::OrganizationUserRemoved as i32, - &user_org.uuid, - &user_org.org_uuid, - ACTING_ADMIN_USER, + EventType::OrganizationUserDeleted as i32, + &membership.uuid, + &membership.org_uuid, + &ACTING_ADMIN_USER.into(), 14, // Use UnknownBrowser type &token.ip.ip, &mut conn, @@ -413,9 +417,9 @@ async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyRe res } -#[post("/users//deauth")] -async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - let mut user = get_user_or_404(uuid, &mut conn).await?; +#[post("/users//deauth", format = "application/json")] +async fn deauth_user(user_id: UserId, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { + let mut user = get_user_or_404(&user_id, &mut conn).await?; nt.send_logout(&user, None).await; @@ -434,9 +438,9 @@ async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notif user.save(&mut conn).await } -#[post("/users//disable")] -async fn disable_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - let mut user = get_user_or_404(uuid, &mut conn).await?; +#[post("/users//disable", format = "application/json")] +async fn disable_user(user_id: UserId, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { + let mut user = get_user_or_404(&user_id, &mut conn).await?; Device::delete_all_by_user(&user.uuid, &mut conn).await?; user.reset_security_stamp(); user.enabled = false; @@ -448,33 +452,35 @@ async fn disable_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Noti save_result } -#[post("/users//enable")] -async fn enable_user(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult { - let mut user = get_user_or_404(uuid, &mut conn).await?; +#[post("/users//enable", format = "application/json")] +async fn enable_user(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> EmptyResult { + let mut user = get_user_or_404(&user_id, &mut conn).await?; user.enabled = true; user.save(&mut conn).await } -#[post("/users//remove-2fa")] -async fn remove_2fa(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult { - let mut user = get_user_or_404(uuid, &mut conn).await?; +#[post("/users//remove-2fa", format = "application/json")] +async fn remove_2fa(user_id: UserId, token: AdminToken, mut conn: DbConn) -> EmptyResult { + let mut user = get_user_or_404(&user_id, &mut conn).await?; TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?; - two_factor::enforce_2fa_policy(&user, ACTING_ADMIN_USER, 14, &token.ip.ip, &mut conn).await?; + two_factor::enforce_2fa_policy(&user, &ACTING_ADMIN_USER.into(), 14, &token.ip.ip, &mut conn).await?; user.totp_recover = None; user.save(&mut conn).await } -#[post("/users//invite/resend")] -async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult { - if let Some(user) = User::find_by_uuid(uuid, &mut conn).await { +#[post("/users//invite/resend", format = "application/json")] +async fn resend_user_invite(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> EmptyResult { + if let Some(user) = User::find_by_uuid(&user_id, &mut conn).await { //TODO: replace this with user.status check when it will be available (PR#3397) if !user.password_hash.is_empty() { err_code!("User already accepted invitation", Status::BadRequest.code); } if CONFIG.mail_enabled() { - mail::send_invite(&user, None, None, &CONFIG.invitation_org_name(), None).await + let org_id: OrganizationId = FAKE_ADMIN_UUID.to_string().into(); + let member_id: MembershipId = FAKE_ADMIN_UUID.to_string().into(); + mail::send_invite(&user, org_id, member_id, &CONFIG.invitation_org_name(), None).await } else { Ok(()) } @@ -484,42 +490,41 @@ async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) -> } #[derive(Debug, Deserialize)] -struct UserOrgTypeData { +struct MembershipTypeData { user_type: NumberOrString, - user_uuid: String, - org_uuid: String, + user_uuid: UserId, + org_uuid: OrganizationId, } -#[post("/users/org_type", data = "")] -async fn update_user_org_type(data: Json, token: AdminToken, mut conn: DbConn) -> EmptyResult { - let data: UserOrgTypeData = data.into_inner(); +#[post("/users/org_type", format = "application/json", data = "")] +async fn update_membership_type(data: Json, token: AdminToken, mut conn: DbConn) -> EmptyResult { + let data: MembershipTypeData = data.into_inner(); - let mut user_to_edit = - match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &mut conn).await { - Some(user) => user, - None => err!("The specified user isn't member of the organization"), - }; + let Some(mut member_to_edit) = Membership::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &mut conn).await + else { + err!("The specified user isn't member of the organization") + }; - let new_type = match UserOrgType::from_str(&data.user_type.into_string()) { + let new_type = match MembershipType::from_str(&data.user_type.into_string()) { Some(new_type) => new_type as i32, None => err!("Invalid type"), }; - if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner { + if member_to_edit.atype == MembershipType::Owner && new_type != MembershipType::Owner { // Removing owner permission, check that there is at least one other confirmed owner - if UserOrganization::count_confirmed_by_org_and_type(&data.org_uuid, UserOrgType::Owner, &mut conn).await <= 1 { + if Membership::count_confirmed_by_org_and_type(&data.org_uuid, MembershipType::Owner, &mut conn).await <= 1 { err!("Can't change the type of the last owner") } } - // This check is also done at api::organizations::{accept_invite(), _confirm_invite, _activate_user(), edit_user()}, update_user_org_type + // This check is also done at api::organizations::{accept_invite, _confirm_invite, _activate_member, edit_member}, update_membership_type // It returns different error messages per function. - if new_type < UserOrgType::Admin { - match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await { + if new_type < MembershipType::Admin { + match OrgPolicy::is_user_allowed(&member_to_edit.user_uuid, &member_to_edit.org_uuid, true, &mut conn).await { Ok(_) => {} Err(OrgPolicyErr::TwoFactorMissing) => { if CONFIG.email_2fa_auto_fallback() { - two_factor::email::find_and_activate_email_2fa(&user_to_edit.user_uuid, &mut conn).await?; + two_factor::email::find_and_activate_email_2fa(&member_to_edit.user_uuid, &mut conn).await?; } else { err!("You cannot modify this user to this type because they have not setup 2FA"); } @@ -532,20 +537,20 @@ async fn update_user_org_type(data: Json, token: AdminToken, mu log_event( EventType::OrganizationUserUpdated as i32, - &user_to_edit.uuid, + &member_to_edit.uuid, &data.org_uuid, - ACTING_ADMIN_USER, + &ACTING_ADMIN_USER.into(), 14, // Use UnknownBrowser type &token.ip.ip, &mut conn, ) .await; - user_to_edit.atype = new_type; - user_to_edit.save(&mut conn).await + member_to_edit.atype = new_type; + member_to_edit.save(&mut conn).await } -#[post("/users/update_revision")] +#[post("/users/update_revision", format = "application/json")] async fn update_revision_users(_token: AdminToken, mut conn: DbConn) -> EmptyResult { User::update_all_revisions(&mut conn).await } @@ -556,7 +561,7 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu let mut organizations_json = Vec::with_capacity(organizations.len()); for o in organizations { let mut org = o.to_json(); - org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &mut conn).await); + org["user_count"] = json!(Membership::count_by_org(&o.uuid, &mut conn).await); org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &mut conn).await); org["collection_count"] = json!(Collection::count_by_org(&o.uuid, &mut conn).await); org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await); @@ -570,9 +575,9 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu Ok(Html(text)) } -#[post("/organizations//delete")] -async fn delete_organization(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult { - let org = Organization::find_by_uuid(uuid, &mut conn).await.map_res("Organization doesn't exist")?; +#[post("/organizations//delete", format = "application/json")] +async fn delete_organization(org_id: OrganizationId, _token: AdminToken, mut conn: DbConn) -> EmptyResult { + let org = Organization::find_by_uuid(&org_id, &mut conn).await.map_res("Organization doesn't exist")?; org.delete(&mut conn).await } @@ -601,9 +606,8 @@ async fn get_json_api(url: &str) -> Result { } async fn has_http_access() -> bool { - let req = match make_http_request(Method::HEAD, "https://github.com/dani-garcia/vaultwarden") { - Ok(r) => r, - Err(_) => return false, + let Ok(req) = make_http_request(Method::HEAD, "https://github.com/dani-garcia/vaultwarden") else { + return false; }; match req.send().await { Ok(r) => r.status().is_success(), @@ -713,6 +717,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn) "ip_header_name": ip_header_name, "ip_header_config": &CONFIG.ip_header(), "uses_proxy": uses_proxy, + "enable_websocket": &CONFIG.enable_websocket(), "db_type": *DB_TYPE, "db_version": get_sql_server_version(&mut conn).await, "admin_url": format!("{}/diagnostics", admin_url()), @@ -728,22 +733,27 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn) Ok(Html(text)) } -#[get("/diagnostics/config")] +#[get("/diagnostics/config", format = "application/json")] fn get_diagnostics_config(_token: AdminToken) -> Json { let support_json = CONFIG.get_support_json(); Json(support_json) } -#[post("/config", data = "")] +#[get("/diagnostics/http?")] +fn get_diagnostics_http(code: u16, _token: AdminToken) -> EmptyResult { + err_code!(format!("Testing error {code} response"), code); +} + +#[post("/config", format = "application/json", data = "")] fn post_config(data: Json, _token: AdminToken) -> EmptyResult { let data: ConfigBuilder = data.into_inner(); - if let Err(e) = CONFIG.update_config(data) { + if let Err(e) = CONFIG.update_config(data, true) { err!(format!("Unable to save config: {e:?}")) } Ok(()) } -#[post("/config/delete")] +#[post("/config/delete", format = "application/json")] fn delete_config(_token: AdminToken) -> EmptyResult { if let Err(e) = CONFIG.delete_user_config() { err!(format!("Unable to delete config: {e:?}")) @@ -751,7 +761,7 @@ fn delete_config(_token: AdminToken) -> EmptyResult { Ok(()) } -#[post("/config/backup_db")] +#[post("/config/backup_db", format = "application/json")] async fn backup_db(_token: AdminToken, mut conn: DbConn) -> ApiResult { if *CAN_BACKUP { match backup_database(&mut conn).await { diff --git a/src/api/core/accounts.rs b/src/api/core/accounts.rs index e9a91efa..d15648f7 100644 --- a/src/api/core/accounts.rs +++ b/src/api/core/accounts.rs @@ -30,6 +30,7 @@ pub fn routes() -> Vec { profile, put_profile, post_profile, + put_avatar, get_public_keys, post_keys, post_password, @@ -42,9 +43,8 @@ pub fn routes() -> Vec { post_verify_email_token, post_delete_recover, post_delete_recover_token, - post_device_token, - delete_account, post_delete_account, + delete_account, revision_date, password_hint, prelogin, @@ -52,7 +52,9 @@ pub fn routes() -> Vec { api_key, rotate_api_key, get_known_device, - put_avatar, + get_all_devices, + get_device, + post_device_token, put_device_token, put_clear_device_token, post_clear_device_token, @@ -84,10 +86,10 @@ pub struct RegisterData { name: Option, - #[allow(dead_code)] - organization_user_id: Option, #[serde(alias = "orgInviteToken")] token: Option, + #[allow(dead_code)] + organization_user_id: Option, // Used only from the register/finish endpoint email_verification_token: Option, @@ -117,15 +119,15 @@ fn enforce_password_hint_setting(password_hint: &Option) -> EmptyResult } Ok(()) } -async fn is_email_2fa_required(org_user_uuid: Option, conn: &mut DbConn) -> bool { +async fn is_email_2fa_required(member_id: Option, conn: &mut DbConn) -> bool { if !CONFIG._enable_email_2fa() { return false; } if CONFIG.email_2fa_enforce_on_verified_invite() { return true; } - if org_user_uuid.is_some() { - return OrgPolicy::is_enabled_for_member(&org_user_uuid.unwrap(), OrgPolicyType::TwoFactorAuthentication, conn) + if member_id.is_some() { + return OrgPolicy::is_enabled_for_member(&member_id.unwrap(), OrgPolicyType::TwoFactorAuthentication, conn) .await; } false @@ -190,9 +192,9 @@ pub async fn _register(data: Json, email_verification: bool, mut c err!("Registration email does not match invite email") } } else if Invitation::take(&email, &mut conn).await { - for user_org in UserOrganization::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() { - user_org.status = UserOrgStatus::Accepted as i32; - user_org.save(&mut conn).await?; + for membership in Membership::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() { + membership.status = MembershipStatus::Accepted as i32; + membership.save(&mut conn).await?; } user } else if CONFIG.is_signup_allowed(&email) @@ -338,9 +340,9 @@ async fn put_avatar(data: Json, headers: Headers, mut conn: DbConn) Ok(Json(user.to_json(&mut conn).await)) } -#[get("/users//public-key")] -async fn get_public_keys(uuid: &str, _headers: Headers, mut conn: DbConn) -> JsonResult { - let user = match User::find_by_uuid(uuid, &mut conn).await { +#[get("/users//public-key")] +async fn get_public_keys(user_id: UserId, _headers: Headers, mut conn: DbConn) -> JsonResult { + let user = match User::find_by_uuid(&user_id, &mut conn).await { Some(user) if user.public_key.is_some() => user, Some(_) => err_code!("User has no public_key", Status::NotFound.code), None => err_code!("User doesn't exist", Status::NotFound.code), @@ -399,7 +401,12 @@ async fn post_password(data: Json, headers: Headers, mut conn: D &data.new_master_password_hash, Some(data.key), true, - Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]), + Some(vec![ + String::from("post_rotatekey"), + String::from("get_contacts"), + String::from("get_public_keys"), + String::from("get_api_webauthn"), + ]), ); let save_result = user.save(&mut conn).await; @@ -407,7 +414,7 @@ async fn post_password(data: Json, headers: Headers, mut conn: D // Prevent logging out the client where the user requested this endpoint from. // If you do logout the user it will causes issues at the client side. // Adding the device uuid will prevent this. - nt.send_logout(&user, Some(headers.device.uuid)).await; + nt.send_logout(&user, Some(headers.device.uuid.clone())).await; save_result } @@ -467,7 +474,7 @@ async fn post_kdf(data: Json, headers: Headers, mut conn: DbConn, user.set_password(&data.new_master_password_hash, Some(data.key), true, None); let save_result = user.save(&mut conn).await; - nt.send_logout(&user, Some(headers.device.uuid)).await; + nt.send_logout(&user, Some(headers.device.uuid.clone())).await; save_result } @@ -478,21 +485,21 @@ struct UpdateFolderData { // There is a bug in 2024.3.x which adds a `null` item. // To bypass this we allow a Option here, but skip it during the updates // See: https://github.com/bitwarden/clients/issues/8453 - id: Option, + id: Option, name: String, } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct UpdateEmergencyAccessData { - id: String, + id: EmergencyAccessId, key_encrypted: String, } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct UpdateResetPasswordData { - organization_id: String, + organization_id: OrganizationId, reset_password_key: String, } @@ -517,48 +524,49 @@ fn validate_keydata( existing_ciphers: &[Cipher], existing_folders: &[Folder], existing_emergency_access: &[EmergencyAccess], - existing_user_orgs: &[UserOrganization], + existing_memberships: &[Membership], existing_sends: &[Send], ) -> EmptyResult { // Check that we're correctly rotating all the user's ciphers - let existing_cipher_ids = existing_ciphers.iter().map(|c| c.uuid.as_str()).collect::>(); + let existing_cipher_ids = existing_ciphers.iter().map(|c| &c.uuid).collect::>(); let provided_cipher_ids = data .ciphers .iter() .filter(|c| c.organization_id.is_none()) - .filter_map(|c| c.id.as_deref()) - .collect::>(); + .filter_map(|c| c.id.as_ref()) + .collect::>(); if !provided_cipher_ids.is_superset(&existing_cipher_ids) { err!("All existing ciphers must be included in the rotation") } // Check that we're correctly rotating all the user's folders - let existing_folder_ids = existing_folders.iter().map(|f| f.uuid.as_str()).collect::>(); - let provided_folder_ids = data.folders.iter().filter_map(|f| f.id.as_deref()).collect::>(); + let existing_folder_ids = existing_folders.iter().map(|f| &f.uuid).collect::>(); + let provided_folder_ids = data.folders.iter().filter_map(|f| f.id.as_ref()).collect::>(); if !provided_folder_ids.is_superset(&existing_folder_ids) { err!("All existing folders must be included in the rotation") } // Check that we're correctly rotating all the user's emergency access keys let existing_emergency_access_ids = - existing_emergency_access.iter().map(|ea| ea.uuid.as_str()).collect::>(); + existing_emergency_access.iter().map(|ea| &ea.uuid).collect::>(); let provided_emergency_access_ids = - data.emergency_access_keys.iter().map(|ea| ea.id.as_str()).collect::>(); + data.emergency_access_keys.iter().map(|ea| &ea.id).collect::>(); if !provided_emergency_access_ids.is_superset(&existing_emergency_access_ids) { err!("All existing emergency access keys must be included in the rotation") } // Check that we're correctly rotating all the user's reset password keys - let existing_reset_password_ids = existing_user_orgs.iter().map(|uo| uo.org_uuid.as_str()).collect::>(); + let existing_reset_password_ids = + existing_memberships.iter().map(|m| &m.org_uuid).collect::>(); let provided_reset_password_ids = - data.reset_password_keys.iter().map(|rp| rp.organization_id.as_str()).collect::>(); + data.reset_password_keys.iter().map(|rp| &rp.organization_id).collect::>(); if !provided_reset_password_ids.is_superset(&existing_reset_password_ids) { err!("All existing reset password keys must be included in the rotation") } // Check that we're correctly rotating all the user's sends - let existing_send_ids = existing_sends.iter().map(|s| s.uuid.as_str()).collect::>(); - let provided_send_ids = data.sends.iter().filter_map(|s| s.id.as_deref()).collect::>(); + let existing_send_ids = existing_sends.iter().map(|s| &s.uuid).collect::>(); + let provided_send_ids = data.sends.iter().filter_map(|s| s.id.as_ref()).collect::>(); if !provided_send_ids.is_superset(&existing_send_ids) { err!("All existing sends must be included in the rotation") } @@ -581,24 +589,24 @@ async fn post_rotatekey(data: Json, headers: Headers, mut conn: DbConn, // TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks. Cipher::validate_cipher_data(&data.ciphers)?; - let user_uuid = &headers.user.uuid; + let user_id = &headers.user.uuid; // TODO: Ideally we'd do everything after this point in a single transaction. - let mut existing_ciphers = Cipher::find_owned_by_user(user_uuid, &mut conn).await; - let mut existing_folders = Folder::find_by_user(user_uuid, &mut conn).await; - let mut existing_emergency_access = EmergencyAccess::find_all_by_grantor_uuid(user_uuid, &mut conn).await; - let mut existing_user_orgs = UserOrganization::find_by_user(user_uuid, &mut conn).await; + let mut existing_ciphers = Cipher::find_owned_by_user(user_id, &mut conn).await; + let mut existing_folders = Folder::find_by_user(user_id, &mut conn).await; + let mut existing_emergency_access = EmergencyAccess::find_all_by_grantor_uuid(user_id, &mut conn).await; + let mut existing_memberships = Membership::find_by_user(user_id, &mut conn).await; // We only rotate the reset password key if it is set. - existing_user_orgs.retain(|uo| uo.reset_password_key.is_some()); - let mut existing_sends = Send::find_by_user(user_uuid, &mut conn).await; + existing_memberships.retain(|m| m.reset_password_key.is_some()); + let mut existing_sends = Send::find_by_user(user_id, &mut conn).await; validate_keydata( &data, &existing_ciphers, &existing_folders, &existing_emergency_access, - &existing_user_orgs, + &existing_memberships, &existing_sends, )?; @@ -607,9 +615,8 @@ async fn post_rotatekey(data: Json, headers: Headers, mut conn: DbConn, // Skip `null` folder id entries. // See: https://github.com/bitwarden/clients/issues/8453 if let Some(folder_id) = folder_data.id { - let saved_folder = match existing_folders.iter_mut().find(|f| f.uuid == folder_id) { - Some(folder) => folder, - None => err!("Folder doesn't exist"), + let Some(saved_folder) = existing_folders.iter_mut().find(|f| f.uuid == folder_id) else { + err!("Folder doesn't exist") }; saved_folder.name = folder_data.name; @@ -619,11 +626,11 @@ async fn post_rotatekey(data: Json, headers: Headers, mut conn: DbConn, // Update emergency access data for emergency_access_data in data.emergency_access_keys { - let saved_emergency_access = - match existing_emergency_access.iter_mut().find(|ea| ea.uuid == emergency_access_data.id) { - Some(emergency_access) => emergency_access, - None => err!("Emergency access doesn't exist or is not owned by the user"), - }; + let Some(saved_emergency_access) = + existing_emergency_access.iter_mut().find(|ea| ea.uuid == emergency_access_data.id) + else { + err!("Emergency access doesn't exist or is not owned by the user") + }; saved_emergency_access.key_encrypted = Some(emergency_access_data.key_encrypted); saved_emergency_access.save(&mut conn).await? @@ -631,21 +638,20 @@ async fn post_rotatekey(data: Json, headers: Headers, mut conn: DbConn, // Update reset password data for reset_password_data in data.reset_password_keys { - let user_org = match existing_user_orgs.iter_mut().find(|uo| uo.org_uuid == reset_password_data.organization_id) - { - Some(reset_password) => reset_password, - None => err!("Reset password doesn't exist"), + let Some(membership) = + existing_memberships.iter_mut().find(|m| m.org_uuid == reset_password_data.organization_id) + else { + err!("Reset password doesn't exist") }; - user_org.reset_password_key = Some(reset_password_data.reset_password_key); - user_org.save(&mut conn).await? + membership.reset_password_key = Some(reset_password_data.reset_password_key); + membership.save(&mut conn).await? } // Update send data for send_data in data.sends { - let send = match existing_sends.iter_mut().find(|s| &s.uuid == send_data.id.as_ref().unwrap()) { - Some(send) => send, - None => err!("Send doesn't exist"), + let Some(send) = existing_sends.iter_mut().find(|s| &s.uuid == send_data.id.as_ref().unwrap()) else { + err!("Send doesn't exist") }; update_send_from_data(send, send_data, &headers, &mut conn, &nt, UpdateType::None).await?; @@ -656,9 +662,9 @@ async fn post_rotatekey(data: Json, headers: Headers, mut conn: DbConn, for cipher_data in data.ciphers { if cipher_data.organization_id.is_none() { - let saved_cipher = match existing_ciphers.iter_mut().find(|c| &c.uuid == cipher_data.id.as_ref().unwrap()) { - Some(cipher) => cipher, - None => err!("Cipher doesn't exist"), + let Some(saved_cipher) = existing_ciphers.iter_mut().find(|c| &c.uuid == cipher_data.id.as_ref().unwrap()) + else { + err!("Cipher doesn't exist") }; // Prevent triggering cipher updates via WebSockets by settings UpdateType::None @@ -680,7 +686,7 @@ async fn post_rotatekey(data: Json, headers: Headers, mut conn: DbConn, // Prevent logging out the client where the user requested this endpoint from. // If you do logout the user it will causes issues at the client side. // Adding the device uuid will prevent this. - nt.send_logout(&user, Some(headers.device.uuid)).await; + nt.send_logout(&user, Some(headers.device.uuid.clone())).await; save_result } @@ -827,7 +833,7 @@ async fn post_verify_email(headers: Headers) -> EmptyResult { #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct VerifyEmailTokenData { - user_id: String, + user_id: UserId, token: String, } @@ -835,16 +841,14 @@ struct VerifyEmailTokenData { async fn post_verify_email_token(data: Json, mut conn: DbConn) -> EmptyResult { let data: VerifyEmailTokenData = data.into_inner(); - let mut user = match User::find_by_uuid(&data.user_id, &mut conn).await { - Some(user) => user, - None => err!("User doesn't exist"), + let Some(mut user) = User::find_by_uuid(&data.user_id, &mut conn).await else { + err!("User doesn't exist") }; - let claims = match decode_verify_email(&data.token) { - Ok(claims) => claims, - Err(_) => err!("Invalid claim"), + let Ok(claims) = decode_verify_email(&data.token) else { + err!("Invalid claim") }; - if claims.sub != user.uuid { + if claims.sub != *user.uuid { err!("Invalid claim"); } user.verified_at = Some(Utc::now().naive_utc()); @@ -886,7 +890,7 @@ async fn post_delete_recover(data: Json, mut conn: DbConn) -> #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct DeleteRecoverTokenData { - user_id: String, + user_id: UserId, token: String, } @@ -894,16 +898,15 @@ struct DeleteRecoverTokenData { async fn post_delete_recover_token(data: Json, mut conn: DbConn) -> EmptyResult { let data: DeleteRecoverTokenData = data.into_inner(); - let user = match User::find_by_uuid(&data.user_id, &mut conn).await { - Some(user) => user, - None => err!("User doesn't exist"), + let Ok(claims) = decode_delete(&data.token) else { + err!("Invalid claim") }; - let claims = match decode_delete(&data.token) { - Ok(claims) => claims, - Err(_) => err!("Invalid claim"), + let Some(user) = User::find_by_uuid(&data.user_id, &mut conn).await else { + err!("User doesn't exist") }; - if claims.sub != user.uuid { + + if claims.sub != *user.uuid { err!("Invalid claim"); } user.delete(&mut conn).await @@ -955,9 +958,9 @@ async fn password_hint(data: Json, mut conn: DbConn) -> EmptyR // paths that send mail take noticeably longer than ones that // don't. Add a randomized sleep to mitigate this somewhat. use rand::{rngs::SmallRng, Rng, SeedableRng}; - let mut rng = SmallRng::from_entropy(); + let mut rng = SmallRng::from_os_rng(); let delta: i32 = 100; - let sleep_ms = (1_000 + rng.gen_range(-delta..=delta)) as u64; + let sleep_ms = (1_000 + rng.random_range(-delta..=delta)) as u64; tokio::time::sleep(tokio::time::Duration::from_millis(sleep_ms)).await; Ok(()) } else { @@ -1065,7 +1068,7 @@ async fn get_known_device(device: KnownDevice, mut conn: DbConn) -> JsonResult { struct KnownDevice { email: String, - uuid: String, + uuid: DeviceId, } #[rocket::async_trait] @@ -1074,11 +1077,8 @@ impl<'r> FromRequest<'r> for KnownDevice { async fn from_request(req: &'r Request<'_>) -> Outcome { let email = if let Some(email_b64) = req.headers().get_one("X-Request-Email") { - let email_bytes = match data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) { - Ok(bytes) => bytes, - Err(_) => { - return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as base64url")); - } + let Ok(email_bytes) = data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) else { + return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as base64url")); }; match String::from_utf8(email_bytes) { Ok(email) => email, @@ -1091,7 +1091,7 @@ impl<'r> FromRequest<'r> for KnownDevice { }; let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") { - uuid.to_string() + uuid.to_string().into() } else { return Outcome::Error((Status::BadRequest, "X-Device-Identifier value is required")); }; @@ -1103,32 +1103,57 @@ impl<'r> FromRequest<'r> for KnownDevice { } } +#[get("/devices")] +async fn get_all_devices(headers: Headers, mut conn: DbConn) -> JsonResult { + let devices = Device::find_with_auth_request_by_user(&headers.user.uuid, &mut conn).await; + let devices = devices.iter().map(|device| device.to_json()).collect::>(); + + Ok(Json(json!({ + "data": devices, + "continuationToken": null, + "object": "list" + }))) +} + +#[get("/devices/identifier/")] +async fn get_device(device_id: DeviceId, headers: Headers, mut conn: DbConn) -> JsonResult { + let Some(device) = Device::find_by_uuid_and_user(&device_id, &headers.user.uuid, &mut conn).await else { + err!("No device found"); + }; + Ok(Json(device.to_json())) +} + #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct PushToken { push_token: String, } -#[post("/devices/identifier//token", data = "")] -async fn post_device_token(uuid: &str, data: Json, headers: Headers, conn: DbConn) -> EmptyResult { - put_device_token(uuid, data, headers, conn).await +#[post("/devices/identifier//token", data = "")] +async fn post_device_token(device_id: DeviceId, data: Json, headers: Headers, conn: DbConn) -> EmptyResult { + put_device_token(device_id, data, headers, conn).await } -#[put("/devices/identifier//token", data = "")] -async fn put_device_token(uuid: &str, data: Json, headers: Headers, mut conn: DbConn) -> EmptyResult { +#[put("/devices/identifier//token", data = "")] +async fn put_device_token( + device_id: DeviceId, + data: Json, + headers: Headers, + mut conn: DbConn, +) -> EmptyResult { let data = data.into_inner(); let token = data.push_token; - let mut device = match Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await { - Some(device) => device, - None => err!(format!("Error: device {uuid} should be present before a token can be assigned")), + let Some(mut device) = Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await + else { + err!(format!("Error: device {device_id} should be present before a token can be assigned")) }; // if the device already has been registered if device.is_registered() { // check if the new token is the same as the registered token if device.push_token.is_some() && device.push_token.unwrap() == token.clone() { - debug!("Device {} is already registered and token is the same", uuid); + debug!("Device {} is already registered and token is the same", device_id); return Ok(()); } else { // Try to unregister already registered device @@ -1147,8 +1172,8 @@ async fn put_device_token(uuid: &str, data: Json, headers: Headers, m Ok(()) } -#[put("/devices/identifier//clear-token")] -async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult { +#[put("/devices/identifier//clear-token")] +async fn put_clear_device_token(device_id: DeviceId, mut conn: DbConn) -> EmptyResult { // This only clears push token // https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109 // https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37 @@ -1157,8 +1182,8 @@ async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult { return Ok(()); } - if let Some(device) = Device::find_by_uuid(uuid, &mut conn).await { - Device::clear_push_token_by_uuid(uuid, &mut conn).await?; + if let Some(device) = Device::find_by_uuid(&device_id, &mut conn).await { + Device::clear_push_token_by_uuid(&device_id, &mut conn).await?; unregister_push_device(device.push_uuid).await?; } @@ -1166,16 +1191,16 @@ async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult { } // On upstream server, both PUT and POST are declared. Implementing the POST method in case it would be useful somewhere -#[post("/devices/identifier//clear-token")] -async fn post_clear_device_token(uuid: &str, conn: DbConn) -> EmptyResult { - put_clear_device_token(uuid, conn).await +#[post("/devices/identifier//clear-token")] +async fn post_clear_device_token(device_id: DeviceId, conn: DbConn) -> EmptyResult { + put_clear_device_token(device_id, conn).await } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] struct AuthRequestRequest { access_code: String, - device_identifier: String, + device_identifier: DeviceId, email: String, public_key: String, // Not used for now @@ -1192,9 +1217,8 @@ async fn post_auth_request( ) -> JsonResult { let data = data.into_inner(); - let user = match User::find_by_mail(&data.email, &mut conn).await { - Some(user) => user, - None => err!("AuthRequest doesn't exist", "User not found"), + let Some(user) = User::find_by_mail(&data.email, &mut conn).await else { + err!("AuthRequest doesn't exist", "User not found") }; // Validate device uuid and type @@ -1215,6 +1239,15 @@ async fn post_auth_request( nt.send_auth_request(&user.uuid, &auth_request.uuid, &data.device_identifier, &mut conn).await; + log_user_event( + EventType::UserRequestedDeviceApproval as i32, + &user.uuid, + client_headers.device_type, + &client_headers.ip.ip, + &mut conn, + ) + .await; + Ok(Json(json!({ "id": auth_request.uuid, "publicKey": auth_request.public_key, @@ -1230,21 +1263,17 @@ async fn post_auth_request( }))) } -#[get("/auth-requests/")] -async fn get_auth_request(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult { - let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await { - Some(auth_request) => auth_request, - None => err!("AuthRequest doesn't exist", "Record not found"), +#[get("/auth-requests/")] +async fn get_auth_request(auth_request_id: AuthRequestId, headers: Headers, mut conn: DbConn) -> JsonResult { + let Some(auth_request) = AuthRequest::find_by_uuid_and_user(&auth_request_id, &headers.user.uuid, &mut conn).await + else { + err!("AuthRequest doesn't exist", "Record not found or user uuid does not match") }; - if headers.user.uuid != auth_request.user_uuid { - err!("AuthRequest doesn't exist", "User uuid's do not match") - } - let response_date_utc = auth_request.response_date.map(|response_date| format_date(&response_date)); Ok(Json(json!({ - "id": uuid, + "id": &auth_request_id, "publicKey": auth_request.public_key, "requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(), "requestIpAddress": auth_request.request_ip, @@ -1261,15 +1290,15 @@ async fn get_auth_request(uuid: &str, headers: Headers, mut conn: DbConn) -> Jso #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] struct AuthResponseRequest { - device_identifier: String, + device_identifier: DeviceId, key: String, master_password_hash: Option, request_approved: bool, } -#[put("/auth-requests/", data = "")] +#[put("/auth-requests/", data = "")] async fn put_auth_request( - uuid: &str, + auth_request_id: AuthRequestId, data: Json, headers: Headers, mut conn: DbConn, @@ -1277,15 +1306,12 @@ async fn put_auth_request( nt: Notify<'_>, ) -> JsonResult { let data = data.into_inner(); - let mut auth_request: AuthRequest = match AuthRequest::find_by_uuid(uuid, &mut conn).await { - Some(auth_request) => auth_request, - None => err!("AuthRequest doesn't exist", "Record not found"), + let Some(mut auth_request) = + AuthRequest::find_by_uuid_and_user(&auth_request_id, &headers.user.uuid, &mut conn).await + else { + err!("AuthRequest doesn't exist", "Record not found or user uuid does not match") }; - if headers.user.uuid != auth_request.user_uuid { - err!("AuthRequest doesn't exist", "User uuid's do not match") - } - if auth_request.approved.is_some() { err!("An authentication request with the same device already exists") } @@ -1302,14 +1328,31 @@ async fn put_auth_request( auth_request.save(&mut conn).await?; ant.send_auth_response(&auth_request.user_uuid, &auth_request.uuid).await; - nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, data.device_identifier, &mut conn).await; + nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, &data.device_identifier, &mut conn).await; + + log_user_event( + EventType::OrganizationUserApprovedAuthRequest as i32, + &headers.user.uuid, + headers.device.atype, + &headers.ip.ip, + &mut conn, + ) + .await; } else { // If denied, there's no reason to keep the request auth_request.delete(&mut conn).await?; + log_user_event( + EventType::OrganizationUserRejectedAuthRequest as i32, + &headers.user.uuid, + headers.device.atype, + &headers.ip.ip, + &mut conn, + ) + .await; } Ok(Json(json!({ - "id": uuid, + "id": &auth_request_id, "publicKey": auth_request.public_key, "requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(), "requestIpAddress": auth_request.request_ip, @@ -1323,16 +1366,15 @@ async fn put_auth_request( }))) } -#[get("/auth-requests//response?")] +#[get("/auth-requests//response?")] async fn get_auth_request_response( - uuid: &str, + auth_request_id: AuthRequestId, code: &str, client_headers: ClientHeaders, mut conn: DbConn, ) -> JsonResult { - let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await { - Some(auth_request) => auth_request, - None => err!("AuthRequest doesn't exist", "User not found"), + let Some(auth_request) = AuthRequest::find_by_uuid(&auth_request_id, &mut conn).await else { + err!("AuthRequest doesn't exist", "User not found") }; if auth_request.device_type != client_headers.device_type @@ -1345,7 +1387,7 @@ async fn get_auth_request_response( let response_date_utc = auth_request.response_date.map(|response_date| format_date(&response_date)); Ok(Json(json!({ - "id": uuid, + "id": &auth_request_id, "publicKey": auth_request.public_key, "requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(), "requestIpAddress": auth_request.request_ip, diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs index aa390e5e..6c75d246 100644 --- a/src/api/core/ciphers.rs +++ b/src/api/core/ciphers.rs @@ -191,11 +191,10 @@ async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json { })) } -#[get("/ciphers/")] -async fn get_cipher(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult { - let cipher = match Cipher::find_by_uuid(uuid, &mut conn).await { - Some(cipher) => cipher, - None => err!("Cipher doesn't exist"), +#[get("/ciphers/")] +async fn get_cipher(cipher_id: CipherId, headers: Headers, mut conn: DbConn) -> JsonResult { + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + err!("Cipher doesn't exist") }; if !cipher.is_accessible_to_user(&headers.user.uuid, &mut conn).await { @@ -205,27 +204,27 @@ async fn get_cipher(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResul Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) } -#[get("/ciphers//admin")] -async fn get_cipher_admin(uuid: &str, headers: Headers, conn: DbConn) -> JsonResult { +#[get("/ciphers//admin")] +async fn get_cipher_admin(cipher_id: CipherId, headers: Headers, conn: DbConn) -> JsonResult { // TODO: Implement this correctly - get_cipher(uuid, headers, conn).await + get_cipher(cipher_id, headers, conn).await } -#[get("/ciphers//details")] -async fn get_cipher_details(uuid: &str, headers: Headers, conn: DbConn) -> JsonResult { - get_cipher(uuid, headers, conn).await +#[get("/ciphers//details")] +async fn get_cipher_details(cipher_id: CipherId, headers: Headers, conn: DbConn) -> JsonResult { + get_cipher(cipher_id, headers, conn).await } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct CipherData { // Id is optional as it is included only in bulk share - pub id: Option, + pub id: Option, // Folder id is not included in import - pub folder_id: Option, + pub folder_id: Option, // TODO: Some of these might appear all the time, no need for Option #[serde(alias = "organizationID")] - pub organization_id: Option, + pub organization_id: Option, key: Option, @@ -257,7 +256,7 @@ pub struct CipherData { // 'Attachments' is unused, contains map of {id: filename} #[allow(dead_code)] attachments: Option, - attachments2: Option>, + attachments2: Option>, // The revision datetime (in ISO 8601 format) of the client's local copy // of the cipher. This is used to prevent a client from updating a cipher @@ -271,7 +270,7 @@ pub struct CipherData { #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct PartialCipherData { - folder_id: Option, + folder_id: Option, favorite: bool, } @@ -356,9 +355,9 @@ async fn enforce_personal_ownership_policy( conn: &mut DbConn, ) -> EmptyResult { if data.is_none() || data.unwrap().organization_id.is_none() { - let user_uuid = &headers.user.uuid; + let user_id = &headers.user.uuid; let policy_type = OrgPolicyType::PersonalOwnership; - if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, None, conn).await { + if OrgPolicy::is_applicable_to_user(user_id, policy_type, None, conn).await { err!("Due to an Enterprise Policy, you are restricted from saving items to your personal vault.") } } @@ -369,7 +368,7 @@ pub async fn update_cipher_from_data( cipher: &mut Cipher, data: CipherData, headers: &Headers, - shared_to_collections: Option>, + shared_to_collections: Option>, conn: &mut DbConn, nt: &Notify<'_>, ut: UpdateType, @@ -406,11 +405,11 @@ pub async fn update_cipher_from_data( let transfer_cipher = cipher.organization_uuid.is_none() && data.organization_id.is_some(); if let Some(org_id) = data.organization_id { - match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await { + match Membership::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await { None => err!("You don't have permission to add item to organization"), - Some(org_user) => { + Some(member) => { if shared_to_collections.is_some() - || org_user.has_full_access() + || member.has_full_access() || cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await { cipher.organization_uuid = Some(org_id); @@ -430,13 +429,8 @@ pub async fn update_cipher_from_data( } if let Some(ref folder_id) = data.folder_id { - match Folder::find_by_uuid(folder_id, conn).await { - Some(folder) => { - if folder.user_uuid != headers.user.uuid { - err!("Folder is not owned by user") - } - } - None => err!("Folder doesn't exist"), + if Folder::find_by_uuid_and_user(folder_id, &headers.user.uuid, conn).await.is_none() { + err!("Invalid folder", "Folder does not exist or belongs to another user"); } } @@ -511,7 +505,7 @@ pub async fn update_cipher_from_data( cipher.fields = data.fields.map(|f| _clean_cipher_data(f).to_string()); cipher.data = type_data.to_string(); cipher.password_history = data.password_history.map(|f| f.to_string()); - cipher.reprompt = data.reprompt; + cipher.reprompt = data.reprompt.filter(|r| *r == RepromptType::None as i32 || *r == RepromptType::Password as i32); cipher.save(conn).await?; cipher.move_to_folder(data.folder_id, &headers.user.uuid, conn).await?; @@ -519,7 +513,7 @@ pub async fn update_cipher_from_data( if ut != UpdateType::None { // Only log events for organizational ciphers - if let Some(org_uuid) = &cipher.organization_uuid { + if let Some(org_id) = &cipher.organization_uuid { let event_type = match (&ut, transfer_cipher) { (UpdateType::SyncCipherCreate, true) => EventType::CipherCreated, (UpdateType::SyncCipherUpdate, true) => EventType::CipherShared, @@ -529,7 +523,7 @@ pub async fn update_cipher_from_data( log_event( event_type as i32, &cipher.uuid, - org_uuid, + org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -585,11 +579,11 @@ async fn post_ciphers_import( Cipher::validate_cipher_data(&data.ciphers)?; // Read and create the folders - let existing_folders: HashSet> = + let existing_folders: HashSet> = Folder::find_by_user(&headers.user.uuid, &mut conn).await.into_iter().map(|f| Some(f.uuid)).collect(); - let mut folders: Vec = Vec::with_capacity(data.folders.len()); + let mut folders: Vec = Vec::with_capacity(data.folders.len()); for folder in data.folders.into_iter() { - let folder_uuid = if existing_folders.contains(&folder.id) { + let folder_id = if existing_folders.contains(&folder.id) { folder.id.unwrap() } else { let mut new_folder = Folder::new(headers.user.uuid.clone(), folder.name); @@ -597,7 +591,7 @@ async fn post_ciphers_import( new_folder.uuid }; - folders.push(folder_uuid); + folders.push(folder_id); } // Read the relations between folders and ciphers @@ -609,8 +603,8 @@ async fn post_ciphers_import( // Read and create the ciphers for (index, mut cipher_data) in data.ciphers.into_iter().enumerate() { - let folder_uuid = relations_map.get(&index).map(|i| folders[*i].clone()); - cipher_data.folder_id = folder_uuid; + let folder_id = relations_map.get(&index).map(|i| folders[*i].clone()); + cipher_data.folder_id = folder_id; let mut cipher = Cipher::new(cipher_data.r#type, cipher_data.name.clone()); update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await?; @@ -624,36 +618,42 @@ async fn post_ciphers_import( } /// Called when an org admin modifies an existing org cipher. -#[put("/ciphers//admin", data = "")] +#[put("/ciphers//admin", data = "")] async fn put_cipher_admin( - uuid: &str, + cipher_id: CipherId, data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - put_cipher(uuid, data, headers, conn, nt).await + put_cipher(cipher_id, data, headers, conn, nt).await } -#[post("/ciphers//admin", data = "")] +#[post("/ciphers//admin", data = "")] async fn post_cipher_admin( - uuid: &str, + cipher_id: CipherId, data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - post_cipher(uuid, data, headers, conn, nt).await + post_cipher(cipher_id, data, headers, conn, nt).await } -#[post("/ciphers/", data = "")] -async fn post_cipher(uuid: &str, data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { - put_cipher(uuid, data, headers, conn, nt).await +#[post("/ciphers/", data = "")] +async fn post_cipher( + cipher_id: CipherId, + data: Json, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { + put_cipher(cipher_id, data, headers, conn, nt).await } -#[put("/ciphers/", data = "")] +#[put("/ciphers/", data = "")] async fn put_cipher( - uuid: &str, + cipher_id: CipherId, data: Json, headers: Headers, mut conn: DbConn, @@ -661,9 +661,8 @@ async fn put_cipher( ) -> JsonResult { let data: CipherData = data.into_inner(); - let mut cipher = match Cipher::find_by_uuid(uuid, &mut conn).await { - Some(cipher) => cipher, - None => err!("Cipher doesn't exist"), + let Some(mut cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + err!("Cipher doesn't exist") }; // TODO: Check if only the folder ID or favorite status is being changed. @@ -680,34 +679,33 @@ async fn put_cipher( Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) } -#[post("/ciphers//partial", data = "")] -async fn post_cipher_partial(uuid: &str, data: Json, headers: Headers, conn: DbConn) -> JsonResult { - put_cipher_partial(uuid, data, headers, conn).await +#[post("/ciphers//partial", data = "")] +async fn post_cipher_partial( + cipher_id: CipherId, + data: Json, + headers: Headers, + conn: DbConn, +) -> JsonResult { + put_cipher_partial(cipher_id, data, headers, conn).await } // Only update the folder and favorite for the user, since this cipher is read-only -#[put("/ciphers//partial", data = "")] +#[put("/ciphers//partial", data = "")] async fn put_cipher_partial( - uuid: &str, + cipher_id: CipherId, data: Json, headers: Headers, mut conn: DbConn, ) -> JsonResult { let data: PartialCipherData = data.into_inner(); - let cipher = match Cipher::find_by_uuid(uuid, &mut conn).await { - Some(cipher) => cipher, - None => err!("Cipher doesn't exist"), + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + err!("Cipher doesn't exist") }; if let Some(ref folder_id) = data.folder_id { - match Folder::find_by_uuid(folder_id, &mut conn).await { - Some(folder) => { - if folder.user_uuid != headers.user.uuid { - err!("Folder is not owned by user") - } - } - None => err!("Folder doesn't exist"), + if Folder::find_by_uuid_and_user(folder_id, &headers.user.uuid, &mut conn).await.is_none() { + err!("Invalid folder", "Folder does not exist or belongs to another user"); } } @@ -723,29 +721,29 @@ async fn put_cipher_partial( #[serde(rename_all = "camelCase")] struct CollectionsAdminData { #[serde(alias = "CollectionIds")] - collection_ids: Vec, + collection_ids: Vec, } -#[put("/ciphers//collections_v2", data = "")] +#[put("/ciphers//collections_v2", data = "")] async fn put_collections2_update( - uuid: &str, + cipher_id: CipherId, data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - post_collections2_update(uuid, data, headers, conn, nt).await + post_collections2_update(cipher_id, data, headers, conn, nt).await } -#[post("/ciphers//collections_v2", data = "")] +#[post("/ciphers//collections_v2", data = "")] async fn post_collections2_update( - uuid: &str, + cipher_id: CipherId, data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - let cipher_details = post_collections_update(uuid, data, headers, conn, nt).await?; + let cipher_details = post_collections_update(cipher_id, data, headers, conn, nt).await?; Ok(Json(json!({ // AttachmentUploadDataResponseModel "object": "optionalCipherDetails", "unavailable": false, @@ -753,20 +751,20 @@ async fn post_collections2_update( }))) } -#[put("/ciphers//collections", data = "")] +#[put("/ciphers//collections", data = "")] async fn put_collections_update( - uuid: &str, + cipher_id: CipherId, data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - post_collections_update(uuid, data, headers, conn, nt).await + post_collections_update(cipher_id, data, headers, conn, nt).await } -#[post("/ciphers//collections", data = "")] +#[post("/ciphers//collections", data = "")] async fn post_collections_update( - uuid: &str, + cipher_id: CipherId, data: Json, headers: Headers, mut conn: DbConn, @@ -774,21 +772,21 @@ async fn post_collections_update( ) -> JsonResult { let data: CollectionsAdminData = data.into_inner(); - let cipher = match Cipher::find_by_uuid(uuid, &mut conn).await { - Some(cipher) => cipher, - None => err!("Cipher doesn't exist"), + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + err!("Cipher doesn't exist") }; if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await { err!("Cipher is not write accessible") } - let posted_collections = HashSet::::from_iter(data.collection_ids); + let posted_collections = HashSet::::from_iter(data.collection_ids); let current_collections = - HashSet::::from_iter(cipher.get_collections(headers.user.uuid.clone(), &mut conn).await); + HashSet::::from_iter(cipher.get_collections(headers.user.uuid.clone(), &mut conn).await); for collection in posted_collections.symmetric_difference(¤t_collections) { - match Collection::find_by_uuid(collection, &mut conn).await { + match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &mut conn).await + { None => err!("Invalid collection ID provided"), Some(collection) => { if collection.is_writable_by_user(&headers.user.uuid, &mut conn).await { @@ -830,20 +828,20 @@ async fn post_collections_update( Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) } -#[put("/ciphers//collections-admin", data = "")] +#[put("/ciphers//collections-admin", data = "")] async fn put_collections_admin( - uuid: &str, + cipher_id: CipherId, data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { - post_collections_admin(uuid, data, headers, conn, nt).await + post_collections_admin(cipher_id, data, headers, conn, nt).await } -#[post("/ciphers//collections-admin", data = "")] +#[post("/ciphers//collections-admin", data = "")] async fn post_collections_admin( - uuid: &str, + cipher_id: CipherId, data: Json, headers: Headers, mut conn: DbConn, @@ -851,21 +849,21 @@ async fn post_collections_admin( ) -> EmptyResult { let data: CollectionsAdminData = data.into_inner(); - let cipher = match Cipher::find_by_uuid(uuid, &mut conn).await { - Some(cipher) => cipher, - None => err!("Cipher doesn't exist"), + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + err!("Cipher doesn't exist") }; if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await { err!("Cipher is not write accessible") } - let posted_collections = HashSet::::from_iter(data.collection_ids); + let posted_collections = HashSet::::from_iter(data.collection_ids); let current_collections = - HashSet::::from_iter(cipher.get_admin_collections(headers.user.uuid.clone(), &mut conn).await); + HashSet::::from_iter(cipher.get_admin_collections(headers.user.uuid.clone(), &mut conn).await); for collection in posted_collections.symmetric_difference(¤t_collections) { - match Collection::find_by_uuid(collection, &mut conn).await { + match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &mut conn).await + { None => err!("Invalid collection ID provided"), Some(collection) => { if collection.is_writable_by_user(&headers.user.uuid, &mut conn).await { @@ -913,12 +911,12 @@ struct ShareCipherData { #[serde(alias = "Cipher")] cipher: CipherData, #[serde(alias = "CollectionIds")] - collection_ids: Vec, + collection_ids: Vec, } -#[post("/ciphers//share", data = "")] +#[post("/ciphers//share", data = "")] async fn post_cipher_share( - uuid: &str, + cipher_id: CipherId, data: Json, headers: Headers, mut conn: DbConn, @@ -926,12 +924,12 @@ async fn post_cipher_share( ) -> JsonResult { let data: ShareCipherData = data.into_inner(); - share_cipher_by_uuid(uuid, data, &headers, &mut conn, &nt).await + share_cipher_by_uuid(&cipher_id, data, &headers, &mut conn, &nt).await } -#[put("/ciphers//share", data = "")] +#[put("/ciphers//share", data = "")] async fn put_cipher_share( - uuid: &str, + cipher_id: CipherId, data: Json, headers: Headers, mut conn: DbConn, @@ -939,14 +937,14 @@ async fn put_cipher_share( ) -> JsonResult { let data: ShareCipherData = data.into_inner(); - share_cipher_by_uuid(uuid, data, &headers, &mut conn, &nt).await + share_cipher_by_uuid(&cipher_id, data, &headers, &mut conn, &nt).await } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct ShareSelectedCipherData { ciphers: Vec, - collection_ids: Vec, + collection_ids: Vec, } #[put("/ciphers/share", data = "")] @@ -988,13 +986,13 @@ async fn put_cipher_share_selected( } async fn share_cipher_by_uuid( - uuid: &str, + cipher_id: &CipherId, data: ShareCipherData, headers: &Headers, conn: &mut DbConn, nt: &Notify<'_>, ) -> JsonResult { - let mut cipher = match Cipher::find_by_uuid(uuid, conn).await { + let mut cipher = match Cipher::find_by_uuid(cipher_id, conn).await { Some(cipher) => { if cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await { cipher @@ -1007,9 +1005,9 @@ async fn share_cipher_by_uuid( let mut shared_to_collections = vec![]; - if let Some(organization_uuid) = &data.cipher.organization_id { - for uuid in &data.collection_ids { - match Collection::find_by_uuid_and_org(uuid, organization_uuid, conn).await { + if let Some(organization_id) = &data.cipher.organization_id { + for col_id in &data.collection_ids { + match Collection::find_by_uuid_and_org(col_id, organization_id, conn).await { None => err!("Invalid collection ID provided"), Some(collection) => { if collection.is_writable_by_user(&headers.user.uuid, conn).await { @@ -1041,19 +1039,23 @@ async fn share_cipher_by_uuid( /// Upstream added this v2 API to support direct download of attachments from /// their object storage service. For self-hosted instances, it basically just /// redirects to the same location as before the v2 API. -#[get("/ciphers//attachment/")] -async fn get_attachment(uuid: &str, attachment_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { - let cipher = match Cipher::find_by_uuid(uuid, &mut conn).await { - Some(cipher) => cipher, - None => err!("Cipher doesn't exist"), +#[get("/ciphers//attachment/")] +async fn get_attachment( + cipher_id: CipherId, + attachment_id: AttachmentId, + headers: Headers, + mut conn: DbConn, +) -> JsonResult { + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + err!("Cipher doesn't exist") }; if !cipher.is_accessible_to_user(&headers.user.uuid, &mut conn).await { err!("Cipher is not accessible") } - match Attachment::find_by_id(attachment_id, &mut conn).await { - Some(attachment) if uuid == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host))), + match Attachment::find_by_id(&attachment_id, &mut conn).await { + Some(attachment) if cipher_id == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host))), Some(_) => err!("Attachment doesn't belong to cipher"), None => err!("Attachment doesn't exist"), } @@ -1077,16 +1079,15 @@ enum FileUploadType { /// This redirects the client to the API it should use to upload the attachment. /// For upstream's cloud-hosted service, it's an Azure object storage API. /// For self-hosted instances, it's another API on the local instance. -#[post("/ciphers//attachment/v2", data = "")] +#[post("/ciphers//attachment/v2", data = "")] async fn post_attachment_v2( - uuid: &str, + cipher_id: CipherId, data: Json, headers: Headers, mut conn: DbConn, ) -> JsonResult { - let cipher = match Cipher::find_by_uuid(uuid, &mut conn).await { - Some(cipher) => cipher, - None => err!("Cipher doesn't exist"), + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + err!("Cipher doesn't exist") }; if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await { @@ -1135,7 +1136,7 @@ struct UploadData<'f> { /// database record, which is passed in as `attachment`. async fn save_attachment( mut attachment: Option, - cipher_uuid: &str, + cipher_id: CipherId, data: Form>, headers: &Headers, mut conn: DbConn, @@ -1150,9 +1151,8 @@ async fn save_attachment( err!("Attachment size can't be negative") } - let cipher = match Cipher::find_by_uuid(cipher_uuid, &mut conn).await { - Some(cipher) => cipher, - None => err!("Cipher doesn't exist"), + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + err!("Cipher doesn't exist") }; if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await { @@ -1166,11 +1166,11 @@ async fn save_attachment( Some(a) => a.file_size, // v2 API }; - let size_limit = if let Some(ref user_uuid) = cipher.user_uuid { + let size_limit = if let Some(ref user_id) = cipher.user_uuid { match CONFIG.user_attachment_limit() { Some(0) => err!("Attachments are disabled"), Some(limit_kb) => { - let already_used = Attachment::size_by_user(user_uuid, &mut conn).await; + let already_used = Attachment::size_by_user(user_id, &mut conn).await; let left = limit_kb .checked_mul(1024) .and_then(|l| l.checked_sub(already_used)) @@ -1188,11 +1188,11 @@ async fn save_attachment( } None => None, } - } else if let Some(ref org_uuid) = cipher.organization_uuid { + } else if let Some(ref org_id) = cipher.organization_uuid { match CONFIG.org_attachment_limit() { Some(0) => err!("Attachments are disabled"), Some(limit_kb) => { - let already_used = Attachment::size_by_org(org_uuid, &mut conn).await; + let already_used = Attachment::size_by_org(org_id, &mut conn).await; let left = limit_kb .checked_mul(1024) .and_then(|l| l.checked_sub(already_used)) @@ -1265,12 +1265,12 @@ async fn save_attachment( err!("No attachment key provided") } let attachment = - Attachment::new(file_id.clone(), String::from(cipher_uuid), encrypted_filename.unwrap(), size, data.key); + Attachment::new(file_id.clone(), cipher_id.clone(), encrypted_filename.unwrap(), size, data.key); attachment.save(&mut conn).await.expect("Error saving attachment"); } - let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_uuid); - let file_path = folder_path.join(&file_id); + let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_id.as_ref()); + let file_path = folder_path.join(file_id.as_ref()); tokio::fs::create_dir_all(&folder_path).await?; if let Err(_err) = data.data.persist_to(&file_path).await { @@ -1287,11 +1287,11 @@ async fn save_attachment( ) .await; - if let Some(org_uuid) = &cipher.organization_uuid { + if let Some(org_id) = &cipher.organization_uuid { log_event( EventType::CipherAttachmentCreated as i32, &cipher.uuid, - org_uuid, + org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -1305,32 +1305,32 @@ async fn save_attachment( /// v2 API for uploading the actual data content of an attachment. /// This route needs a rank specified so that Rocket prioritizes the -/// /ciphers//attachment/v2 route, which would otherwise conflict +/// /ciphers//attachment/v2 route, which would otherwise conflict /// with this one. -#[post("/ciphers//attachment/", format = "multipart/form-data", data = "", rank = 1)] +#[post("/ciphers//attachment/", format = "multipart/form-data", data = "", rank = 1)] async fn post_attachment_v2_data( - uuid: &str, - attachment_id: &str, + cipher_id: CipherId, + attachment_id: AttachmentId, data: Form>, headers: Headers, mut conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { - let attachment = match Attachment::find_by_id(attachment_id, &mut conn).await { - Some(attachment) if uuid == attachment.cipher_uuid => Some(attachment), + let attachment = match Attachment::find_by_id(&attachment_id, &mut conn).await { + Some(attachment) if cipher_id == attachment.cipher_uuid => Some(attachment), Some(_) => err!("Attachment doesn't belong to cipher"), None => err!("Attachment doesn't exist"), }; - save_attachment(attachment, uuid, data, &headers, conn, nt).await?; + save_attachment(attachment, cipher_id, data, &headers, conn, nt).await?; Ok(()) } /// Legacy API for creating an attachment associated with a cipher. -#[post("/ciphers//attachment", format = "multipart/form-data", data = "")] +#[post("/ciphers//attachment", format = "multipart/form-data", data = "")] async fn post_attachment( - uuid: &str, + cipher_id: CipherId, data: Form>, headers: Headers, conn: DbConn, @@ -1340,111 +1340,121 @@ async fn post_attachment( // the attachment database record as well as saving the data to disk. let attachment = None; - let (cipher, mut conn) = save_attachment(attachment, uuid, data, &headers, conn, nt).await?; + let (cipher, mut conn) = save_attachment(attachment, cipher_id, data, &headers, conn, nt).await?; Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) } -#[post("/ciphers//attachment-admin", format = "multipart/form-data", data = "")] +#[post("/ciphers//attachment-admin", format = "multipart/form-data", data = "")] async fn post_attachment_admin( - uuid: &str, + cipher_id: CipherId, data: Form>, headers: Headers, conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - post_attachment(uuid, data, headers, conn, nt).await + post_attachment(cipher_id, data, headers, conn, nt).await } -#[post("/ciphers//attachment//share", format = "multipart/form-data", data = "")] +#[post("/ciphers//attachment//share", format = "multipart/form-data", data = "")] async fn post_attachment_share( - uuid: &str, - attachment_id: &str, + cipher_id: CipherId, + attachment_id: AttachmentId, data: Form>, headers: Headers, mut conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - _delete_cipher_attachment_by_id(uuid, attachment_id, &headers, &mut conn, &nt).await?; - post_attachment(uuid, data, headers, conn, nt).await + _delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &mut conn, &nt).await?; + post_attachment(cipher_id, data, headers, conn, nt).await } -#[post("/ciphers//attachment//delete-admin")] +#[post("/ciphers//attachment//delete-admin")] async fn delete_attachment_post_admin( - uuid: &str, - attachment_id: &str, + cipher_id: CipherId, + attachment_id: AttachmentId, headers: Headers, conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { - delete_attachment(uuid, attachment_id, headers, conn, nt).await + delete_attachment(cipher_id, attachment_id, headers, conn, nt).await } -#[post("/ciphers//attachment//delete")] +#[post("/ciphers//attachment//delete")] async fn delete_attachment_post( - uuid: &str, - attachment_id: &str, + cipher_id: CipherId, + attachment_id: AttachmentId, headers: Headers, conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { - delete_attachment(uuid, attachment_id, headers, conn, nt).await + delete_attachment(cipher_id, attachment_id, headers, conn, nt).await } -#[delete("/ciphers//attachment/")] +#[delete("/ciphers//attachment/")] async fn delete_attachment( - uuid: &str, - attachment_id: &str, + cipher_id: CipherId, + attachment_id: AttachmentId, headers: Headers, mut conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { - _delete_cipher_attachment_by_id(uuid, attachment_id, &headers, &mut conn, &nt).await + _delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &mut conn, &nt).await } -#[delete("/ciphers//attachment//admin")] +#[delete("/ciphers//attachment//admin")] async fn delete_attachment_admin( - uuid: &str, - attachment_id: &str, + cipher_id: CipherId, + attachment_id: AttachmentId, headers: Headers, mut conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { - _delete_cipher_attachment_by_id(uuid, attachment_id, &headers, &mut conn, &nt).await + _delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &mut conn, &nt).await } -#[post("/ciphers//delete")] -async fn delete_cipher_post(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(uuid, &headers, &mut conn, false, &nt).await +#[post("/ciphers//delete")] +async fn delete_cipher_post(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, false, &nt).await // permanent delete } -#[post("/ciphers//delete-admin")] -async fn delete_cipher_post_admin(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(uuid, &headers, &mut conn, false, &nt).await +#[post("/ciphers//delete-admin")] +async fn delete_cipher_post_admin( + cipher_id: CipherId, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { + _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, false, &nt).await // permanent delete } -#[put("/ciphers//delete")] -async fn delete_cipher_put(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(uuid, &headers, &mut conn, true, &nt).await +#[put("/ciphers//delete")] +async fn delete_cipher_put(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, true, &nt).await // soft delete } -#[put("/ciphers//delete-admin")] -async fn delete_cipher_put_admin(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(uuid, &headers, &mut conn, true, &nt).await +#[put("/ciphers//delete-admin")] +async fn delete_cipher_put_admin( + cipher_id: CipherId, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { + _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, true, &nt).await } -#[delete("/ciphers/")] -async fn delete_cipher(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(uuid, &headers, &mut conn, false, &nt).await +#[delete("/ciphers/")] +async fn delete_cipher(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, false, &nt).await // permanent delete } -#[delete("/ciphers//admin")] -async fn delete_cipher_admin(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(uuid, &headers, &mut conn, false, &nt).await +#[delete("/ciphers//admin")] +async fn delete_cipher_admin(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, false, &nt).await // permanent delete } @@ -1508,14 +1518,19 @@ async fn delete_cipher_selected_put_admin( _delete_multiple_ciphers(data, headers, conn, true, nt).await // soft delete } -#[put("/ciphers//restore")] -async fn restore_cipher_put(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { - _restore_cipher_by_uuid(uuid, &headers, &mut conn, &nt).await +#[put("/ciphers//restore")] +async fn restore_cipher_put(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { + _restore_cipher_by_uuid(&cipher_id, &headers, &mut conn, &nt).await } -#[put("/ciphers//restore-admin")] -async fn restore_cipher_put_admin(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { - _restore_cipher_by_uuid(uuid, &headers, &mut conn, &nt).await +#[put("/ciphers//restore-admin")] +async fn restore_cipher_put_admin( + cipher_id: CipherId, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { + _restore_cipher_by_uuid(&cipher_id, &headers, &mut conn, &nt).await } #[put("/ciphers/restore", data = "")] @@ -1531,8 +1546,8 @@ async fn restore_cipher_selected( #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct MoveCipherData { - folder_id: Option, - ids: Vec, + folder_id: Option, + ids: Vec, } #[post("/ciphers/move", data = "")] @@ -1543,36 +1558,30 @@ async fn move_cipher_selected( nt: Notify<'_>, ) -> EmptyResult { let data = data.into_inner(); - let user_uuid = headers.user.uuid; + let user_id = headers.user.uuid; if let Some(ref folder_id) = data.folder_id { - match Folder::find_by_uuid(folder_id, &mut conn).await { - Some(folder) => { - if folder.user_uuid != user_uuid { - err!("Folder is not owned by user") - } - } - None => err!("Folder doesn't exist"), + if Folder::find_by_uuid_and_user(folder_id, &user_id, &mut conn).await.is_none() { + err!("Invalid folder", "Folder does not exist or belongs to another user"); } } - for uuid in data.ids { - let cipher = match Cipher::find_by_uuid(&uuid, &mut conn).await { - Some(cipher) => cipher, - None => err!("Cipher doesn't exist"), + for cipher_id in data.ids { + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + err!("Cipher doesn't exist") }; - if !cipher.is_accessible_to_user(&user_uuid, &mut conn).await { + if !cipher.is_accessible_to_user(&user_id, &mut conn).await { err!("Cipher is not accessible by user") } // Move cipher - cipher.move_to_folder(data.folder_id.clone(), &user_uuid, &mut conn).await?; + cipher.move_to_folder(data.folder_id.clone(), &user_id, &mut conn).await?; nt.send_cipher_update( UpdateType::SyncCipherUpdate, &cipher, - &[user_uuid.clone()], + &[user_id.clone()], &headers.device.uuid, None, &mut conn, @@ -1594,14 +1603,14 @@ async fn move_cipher_selected_put( } #[derive(FromForm)] -struct OrganizationId { +struct OrganizationIdData { #[field(name = "organizationId")] - org_id: String, + org_id: OrganizationId, } #[post("/ciphers/purge?", data = "")] async fn delete_all( - organization: Option, + organization: Option, data: Json, headers: Headers, mut conn: DbConn, @@ -1615,10 +1624,10 @@ async fn delete_all( match organization { Some(org_data) => { // Organization ID in query params, purging organization vault - match UserOrganization::find_by_user_and_org(&user.uuid, &org_data.org_id, &mut conn).await { + match Membership::find_by_user_and_org(&user.uuid, &org_data.org_id, &mut conn).await { None => err!("You don't have permission to purge the organization vault"), - Some(user_org) => { - if user_org.atype == UserOrgType::Owner { + Some(member) => { + if member.atype == MembershipType::Owner { Cipher::delete_all_by_organization(&org_data.org_id, &mut conn).await?; nt.send_user_update(UpdateType::SyncVault, &user).await; @@ -1661,15 +1670,14 @@ async fn delete_all( } async fn _delete_cipher_by_uuid( - uuid: &str, + cipher_id: &CipherId, headers: &Headers, conn: &mut DbConn, soft_delete: bool, nt: &Notify<'_>, ) -> EmptyResult { - let mut cipher = match Cipher::find_by_uuid(uuid, conn).await { - Some(cipher) => cipher, - None => err!("Cipher doesn't exist"), + let Some(mut cipher) = Cipher::find_by_uuid(cipher_id, conn).await else { + err!("Cipher doesn't exist") }; if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await { @@ -1701,13 +1709,13 @@ async fn _delete_cipher_by_uuid( .await; } - if let Some(org_uuid) = cipher.organization_uuid { + if let Some(org_id) = cipher.organization_uuid { let event_type = match soft_delete { true => EventType::CipherSoftDeleted as i32, false => EventType::CipherDeleted as i32, }; - log_event(event_type, &cipher.uuid, &org_uuid, &headers.user.uuid, headers.device.atype, &headers.ip.ip, conn) + log_event(event_type, &cipher.uuid, &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, conn) .await; } @@ -1717,7 +1725,7 @@ async fn _delete_cipher_by_uuid( #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct CipherIdsData { - ids: Vec, + ids: Vec, } async fn _delete_multiple_ciphers( @@ -1729,8 +1737,8 @@ async fn _delete_multiple_ciphers( ) -> EmptyResult { let data = data.into_inner(); - for uuid in data.ids { - if let error @ Err(_) = _delete_cipher_by_uuid(&uuid, &headers, &mut conn, soft_delete, &nt).await { + for cipher_id in data.ids { + if let error @ Err(_) = _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, soft_delete, &nt).await { return error; }; } @@ -1738,10 +1746,14 @@ async fn _delete_multiple_ciphers( Ok(()) } -async fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &mut DbConn, nt: &Notify<'_>) -> JsonResult { - let mut cipher = match Cipher::find_by_uuid(uuid, conn).await { - Some(cipher) => cipher, - None => err!("Cipher doesn't exist"), +async fn _restore_cipher_by_uuid( + cipher_id: &CipherId, + headers: &Headers, + conn: &mut DbConn, + nt: &Notify<'_>, +) -> JsonResult { + let Some(mut cipher) = Cipher::find_by_uuid(cipher_id, conn).await else { + err!("Cipher doesn't exist") }; if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await { @@ -1761,11 +1773,11 @@ async fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &mut DbCon ) .await; - if let Some(org_uuid) = &cipher.organization_uuid { + if let Some(org_id) = &cipher.organization_uuid { log_event( EventType::CipherRestored as i32, &cipher.uuid.clone(), - org_uuid, + org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -1786,8 +1798,8 @@ async fn _restore_multiple_ciphers( let data = data.into_inner(); let mut ciphers: Vec = Vec::new(); - for uuid in data.ids { - match _restore_cipher_by_uuid(&uuid, headers, conn, nt).await { + for cipher_id in data.ids { + match _restore_cipher_by_uuid(&cipher_id, headers, conn, nt).await { Ok(json) => ciphers.push(json.into_inner()), err => return err, } @@ -1801,24 +1813,22 @@ async fn _restore_multiple_ciphers( } async fn _delete_cipher_attachment_by_id( - uuid: &str, - attachment_id: &str, + cipher_id: &CipherId, + attachment_id: &AttachmentId, headers: &Headers, conn: &mut DbConn, nt: &Notify<'_>, ) -> EmptyResult { - let attachment = match Attachment::find_by_id(attachment_id, conn).await { - Some(attachment) => attachment, - None => err!("Attachment doesn't exist"), + let Some(attachment) = Attachment::find_by_id(attachment_id, conn).await else { + err!("Attachment doesn't exist") }; - if attachment.cipher_uuid != uuid { + if &attachment.cipher_uuid != cipher_id { err!("Attachment from other cipher") } - let cipher = match Cipher::find_by_uuid(uuid, conn).await { - Some(cipher) => cipher, - None => err!("Cipher doesn't exist"), + let Some(cipher) = Cipher::find_by_uuid(cipher_id, conn).await else { + err!("Cipher doesn't exist") }; if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await { @@ -1837,11 +1847,11 @@ async fn _delete_cipher_attachment_by_id( ) .await; - if let Some(org_uuid) = cipher.organization_uuid { + if let Some(org_id) = cipher.organization_uuid { log_event( EventType::CipherAttachmentDeleted as i32, &cipher.uuid, - &org_uuid, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -1857,14 +1867,14 @@ async fn _delete_cipher_attachment_by_id( /// It will prevent the so called N+1 SQL issue by running just a few queries which will hold all the data needed. /// This will not improve the speed of a single cipher.to_json() call that much, so better not to use it for those calls. pub struct CipherSyncData { - pub cipher_attachments: HashMap>, - pub cipher_folders: HashMap, - pub cipher_favorites: HashSet, - pub cipher_collections: HashMap>, - pub user_organizations: HashMap, - pub user_collections: HashMap, - pub user_collections_groups: HashMap, - pub user_group_full_access_for_organizations: HashSet, + pub cipher_attachments: HashMap>, + pub cipher_folders: HashMap, + pub cipher_favorites: HashSet, + pub cipher_collections: HashMap>, + pub members: HashMap, + pub user_collections: HashMap, + pub user_collections_groups: HashMap, + pub user_group_full_access_for_organizations: HashSet, } #[derive(Eq, PartialEq)] @@ -1874,17 +1884,17 @@ pub enum CipherSyncType { } impl CipherSyncData { - pub async fn new(user_uuid: &str, sync_type: CipherSyncType, conn: &mut DbConn) -> Self { - let cipher_folders: HashMap; - let cipher_favorites: HashSet; + pub async fn new(user_id: &UserId, sync_type: CipherSyncType, conn: &mut DbConn) -> Self { + let cipher_folders: HashMap; + let cipher_favorites: HashSet; match sync_type { // User Sync supports Folders and Favorites CipherSyncType::User => { // Generate a HashMap with the Cipher UUID as key and the Folder UUID as value - cipher_folders = FolderCipher::find_by_user(user_uuid, conn).await.into_iter().collect(); + cipher_folders = FolderCipher::find_by_user(user_id, conn).await.into_iter().collect(); // Generate a HashSet of all the Cipher UUID's which are marked as favorite - cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_uuid, conn).await.into_iter().collect(); + cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_id, conn).await.into_iter().collect(); } // Organization Sync does not support Folders and Favorites. // If these are set, it will cause issues in the web-vault. @@ -1895,38 +1905,35 @@ impl CipherSyncData { } // Generate a list of Cipher UUID's containing a Vec with one or more Attachment records - let user_org_uuids = UserOrganization::get_org_uuid_by_user(user_uuid, conn).await; - let attachments = Attachment::find_all_by_user_and_orgs(user_uuid, &user_org_uuids, conn).await; - let mut cipher_attachments: HashMap> = HashMap::with_capacity(attachments.len()); + let orgs = Membership::get_orgs_by_user(user_id, conn).await; + let attachments = Attachment::find_all_by_user_and_orgs(user_id, &orgs, conn).await; + let mut cipher_attachments: HashMap> = HashMap::with_capacity(attachments.len()); for attachment in attachments { cipher_attachments.entry(attachment.cipher_uuid.clone()).or_default().push(attachment); } // Generate a HashMap with the Cipher UUID as key and one or more Collection UUID's - let user_cipher_collections = Cipher::get_collections_with_cipher_by_user(user_uuid.to_string(), conn).await; - let mut cipher_collections: HashMap> = + let user_cipher_collections = Cipher::get_collections_with_cipher_by_user(user_id.clone(), conn).await; + let mut cipher_collections: HashMap> = HashMap::with_capacity(user_cipher_collections.len()); for (cipher, collection) in user_cipher_collections { cipher_collections.entry(cipher).or_default().push(collection); } - // Generate a HashMap with the Organization UUID as key and the UserOrganization record - let user_organizations: HashMap = UserOrganization::find_by_user(user_uuid, conn) - .await - .into_iter() - .map(|uo| (uo.org_uuid.clone(), uo)) - .collect(); + // Generate a HashMap with the Organization UUID as key and the Membership record + let members: HashMap = + Membership::find_by_user(user_id, conn).await.into_iter().map(|m| (m.org_uuid.clone(), m)).collect(); // Generate a HashMap with the User_Collections UUID as key and the CollectionUser record - let user_collections: HashMap = CollectionUser::find_by_user(user_uuid, conn) + let user_collections: HashMap = CollectionUser::find_by_user(user_id, conn) .await .into_iter() .map(|uc| (uc.collection_uuid.clone(), uc)) .collect(); // Generate a HashMap with the collections_uuid as key and the CollectionGroup record - let user_collections_groups: HashMap = if CONFIG.org_groups_enabled() { - CollectionGroup::find_by_user(user_uuid, conn) + let user_collections_groups: HashMap = if CONFIG.org_groups_enabled() { + CollectionGroup::find_by_user(user_id, conn) .await .into_iter() .map(|collection_group| (collection_group.collections_uuid.clone(), collection_group)) @@ -1935,9 +1942,9 @@ impl CipherSyncData { HashMap::new() }; - // Get all organizations that the user has full access to via group assignment - let user_group_full_access_for_organizations: HashSet = if CONFIG.org_groups_enabled() { - Group::gather_user_organizations_full_access(user_uuid, conn).await.into_iter().collect() + // Get all organizations that the given user has full access to via group assignment + let user_group_full_access_for_organizations: HashSet = if CONFIG.org_groups_enabled() { + Group::get_orgs_by_user_with_full_access(user_id, conn).await.into_iter().collect() } else { HashSet::new() }; @@ -1947,7 +1954,7 @@ impl CipherSyncData { cipher_folders, cipher_favorites, cipher_collections, - user_organizations, + members, user_collections, user_collections_groups, user_group_full_access_for_organizations, diff --git a/src/api/core/emergency_access.rs b/src/api/core/emergency_access.rs index 1c29b774..8c6fcb65 100644 --- a/src/api/core/emergency_access.rs +++ b/src/api/core/emergency_access.rs @@ -93,10 +93,10 @@ async fn get_grantees(headers: Headers, mut conn: DbConn) -> Json { } #[get("/emergency-access/")] -async fn get_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn get_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult { check_emergency_access_enabled()?; - match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await { + match EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await { Some(emergency_access) => Ok(Json( emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"), )), @@ -118,7 +118,7 @@ struct EmergencyAccessUpdateData { #[put("/emergency-access/", data = "")] async fn put_emergency_access( - emer_id: &str, + emer_id: EmergencyAccessId, data: Json, headers: Headers, conn: DbConn, @@ -128,7 +128,7 @@ async fn put_emergency_access( #[post("/emergency-access/", data = "")] async fn post_emergency_access( - emer_id: &str, + emer_id: EmergencyAccessId, data: Json, headers: Headers, mut conn: DbConn, @@ -137,11 +137,11 @@ async fn post_emergency_access( let data: EmergencyAccessUpdateData = data.into_inner(); - let mut emergency_access = - match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await { - Some(emergency_access) => emergency_access, - None => err!("Emergency access not valid."), - }; + let Some(mut emergency_access) = + EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await + else { + err!("Emergency access not valid.") + }; let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) { Some(new_type) => new_type as i32, @@ -163,12 +163,12 @@ async fn post_emergency_access( // region delete #[delete("/emergency-access/")] -async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn delete_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> EmptyResult { check_emergency_access_enabled()?; let emergency_access = match ( - EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await, - EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await, + EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await, + EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &headers.user.uuid, &mut conn).await, ) { (Some(grantor_emer), None) => { info!("Grantor deleted emergency access {emer_id}"); @@ -186,7 +186,7 @@ async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo } #[post("/emergency-access//delete")] -async fn post_delete_emergency_access(emer_id: &str, headers: Headers, conn: DbConn) -> EmptyResult { +async fn post_delete_emergency_access(emer_id: EmergencyAccessId, headers: Headers, conn: DbConn) -> EmptyResult { delete_emergency_access(emer_id, headers, conn).await } @@ -266,8 +266,8 @@ async fn send_invite(data: Json, headers: Headers, mu if CONFIG.mail_enabled() { mail::send_emergency_access_invite( &new_emergency_access.email.expect("Grantee email does not exists"), - &grantee_user.uuid, - &new_emergency_access.uuid, + grantee_user.uuid, + new_emergency_access.uuid, &grantor_user.name, &grantor_user.email, ) @@ -281,27 +281,25 @@ async fn send_invite(data: Json, headers: Headers, mu } #[post("/emergency-access//reinvite")] -async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn resend_invite(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> EmptyResult { check_emergency_access_enabled()?; - let mut emergency_access = - match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await { - Some(emer) => emer, - None => err!("Emergency access not valid."), - }; + let Some(mut emergency_access) = + EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await + else { + err!("Emergency access not valid.") + }; if emergency_access.status != EmergencyAccessStatus::Invited as i32 { err!("The grantee user is already accepted or confirmed to the organization"); } - let email = match emergency_access.email.clone() { - Some(email) => email, - None => err!("Email not valid."), + let Some(email) = emergency_access.email.clone() else { + err!("Email not valid.") }; - let grantee_user = match User::find_by_mail(&email, &mut conn).await { - Some(user) => user, - None => err!("Grantee user not found."), + let Some(grantee_user) = User::find_by_mail(&email, &mut conn).await else { + err!("Grantee user not found.") }; let grantor_user = headers.user; @@ -309,8 +307,8 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp if CONFIG.mail_enabled() { mail::send_emergency_access_invite( &email, - &grantor_user.uuid, - &emergency_access.uuid, + grantor_user.uuid, + emergency_access.uuid, &grantor_user.name, &grantor_user.email, ) @@ -333,7 +331,12 @@ struct AcceptData { } #[post("/emergency-access//accept", data = "")] -async fn accept_invite(emer_id: &str, data: Json, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn accept_invite( + emer_id: EmergencyAccessId, + data: Json, + headers: Headers, + mut conn: DbConn, +) -> EmptyResult { check_emergency_access_enabled()?; let data: AcceptData = data.into_inner(); @@ -356,16 +359,15 @@ async fn accept_invite(emer_id: &str, data: Json, headers: Headers, // We need to search for the uuid in combination with the email, since we do not yet store the uuid of the grantee in the database. // The uuid of the grantee gets stored once accepted. - let mut emergency_access = - match EmergencyAccess::find_by_uuid_and_grantee_email(emer_id, &headers.user.email, &mut conn).await { - Some(emer) => emer, - None => err!("Emergency access not valid."), - }; + let Some(mut emergency_access) = + EmergencyAccess::find_by_uuid_and_grantee_email(&emer_id, &headers.user.email, &mut conn).await + else { + err!("Emergency access not valid.") + }; // get grantor user to send Accepted email - let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await { - Some(user) => user, - None => err!("Grantor user not found."), + let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else { + err!("Grantor user not found.") }; if emer_id == claims.emer_id @@ -392,7 +394,7 @@ struct ConfirmData { #[post("/emergency-access//confirm", data = "")] async fn confirm_emergency_access( - emer_id: &str, + emer_id: EmergencyAccessId, data: Json, headers: Headers, mut conn: DbConn, @@ -403,11 +405,11 @@ async fn confirm_emergency_access( let data: ConfirmData = data.into_inner(); let key = data.key; - let mut emergency_access = - match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &confirming_user.uuid, &mut conn).await { - Some(emer) => emer, - None => err!("Emergency access not valid."), - }; + let Some(mut emergency_access) = + EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &confirming_user.uuid, &mut conn).await + else { + err!("Emergency access not valid.") + }; if emergency_access.status != EmergencyAccessStatus::Accepted as i32 || emergency_access.grantor_uuid != confirming_user.uuid @@ -415,15 +417,13 @@ async fn confirm_emergency_access( err!("Emergency access not valid.") } - let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &mut conn).await { - Some(user) => user, - None => err!("Grantor user not found."), + let Some(grantor_user) = User::find_by_uuid(&confirming_user.uuid, &mut conn).await else { + err!("Grantor user not found.") }; if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { - let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await { - Some(user) => user, - None => err!("Grantee user not found."), + let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else { + err!("Grantee user not found.") }; emergency_access.status = EmergencyAccessStatus::Confirmed as i32; @@ -446,23 +446,22 @@ async fn confirm_emergency_access( // region access emergency access #[post("/emergency-access//initiate")] -async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn initiate_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult { check_emergency_access_enabled()?; let initiating_user = headers.user; - let mut emergency_access = - match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &initiating_user.uuid, &mut conn).await { - Some(emer) => emer, - None => err!("Emergency access not valid."), - }; + let Some(mut emergency_access) = + EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &initiating_user.uuid, &mut conn).await + else { + err!("Emergency access not valid.") + }; if emergency_access.status != EmergencyAccessStatus::Confirmed as i32 { err!("Emergency access not valid.") } - let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await { - Some(user) => user, - None => err!("Grantor user not found."), + let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else { + err!("Grantor user not found.") }; let now = Utc::now().naive_utc(); @@ -485,28 +484,26 @@ async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: Db } #[post("/emergency-access//approve")] -async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn approve_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult { check_emergency_access_enabled()?; - let mut emergency_access = - match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await { - Some(emer) => emer, - None => err!("Emergency access not valid."), - }; + let Some(mut emergency_access) = + EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await + else { + err!("Emergency access not valid.") + }; if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 { err!("Emergency access not valid.") } - let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await { - Some(user) => user, - None => err!("Grantor user not found."), + let Some(grantor_user) = User::find_by_uuid(&headers.user.uuid, &mut conn).await else { + err!("Grantor user not found.") }; if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { - let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await { - Some(user) => user, - None => err!("Grantee user not found."), + let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else { + err!("Grantee user not found.") }; emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32; @@ -522,14 +519,14 @@ async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbC } #[post("/emergency-access//reject")] -async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn reject_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult { check_emergency_access_enabled()?; - let mut emergency_access = - match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await { - Some(emer) => emer, - None => err!("Emergency access not valid."), - }; + let Some(mut emergency_access) = + EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await + else { + err!("Emergency access not valid.") + }; if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 && emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32 @@ -538,9 +535,8 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo } if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { - let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await { - Some(user) => user, - None => err!("Grantee user not found."), + let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else { + err!("Grantee user not found.") }; emergency_access.status = EmergencyAccessStatus::Confirmed as i32; @@ -560,14 +556,14 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo // region action #[post("/emergency-access//view")] -async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn view_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult { check_emergency_access_enabled()?; - let emergency_access = - match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await { - Some(emer) => emer, - None => err!("Emergency access not valid."), - }; + let Some(emergency_access) = + EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &headers.user.uuid, &mut conn).await + else { + err!("Emergency access not valid.") + }; if !is_valid_request(&emergency_access, &headers.user.uuid, EmergencyAccessType::View) { err!("Emergency access not valid.") @@ -598,23 +594,22 @@ async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn } #[post("/emergency-access//takeover")] -async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn takeover_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult { check_emergency_access_enabled()?; let requesting_user = headers.user; - let emergency_access = - match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await { - Some(emer) => emer, - None => err!("Emergency access not valid."), - }; + let Some(emergency_access) = + EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await + else { + err!("Emergency access not valid.") + }; if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) { err!("Emergency access not valid.") } - let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await { - Some(user) => user, - None => err!("Grantor user not found."), + let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else { + err!("Grantor user not found.") }; let result = json!({ @@ -638,7 +633,7 @@ struct EmergencyAccessPasswordData { #[post("/emergency-access//password", data = "")] async fn password_emergency_access( - emer_id: &str, + emer_id: EmergencyAccessId, data: Json, headers: Headers, mut conn: DbConn, @@ -650,19 +645,18 @@ async fn password_emergency_access( //let key = &data.Key; let requesting_user = headers.user; - let emergency_access = - match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await { - Some(emer) => emer, - None => err!("Emergency access not valid."), - }; + let Some(emergency_access) = + EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await + else { + err!("Emergency access not valid.") + }; if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) { err!("Emergency access not valid.") } - let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await { - Some(user) => user, - None => err!("Grantor user not found."), + let Some(mut grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else { + err!("Grantor user not found.") }; // change grantor_user password @@ -673,9 +667,9 @@ async fn password_emergency_access( TwoFactor::delete_all_by_user(&grantor_user.uuid, &mut conn).await?; // Remove grantor from all organisations unless Owner - for user_org in UserOrganization::find_any_state_by_user(&grantor_user.uuid, &mut conn).await { - if user_org.atype != UserOrgType::Owner as i32 { - user_org.delete(&mut conn).await?; + for member in Membership::find_any_state_by_user(&grantor_user.uuid, &mut conn).await { + if member.atype != MembershipType::Owner as i32 { + member.delete(&mut conn).await?; } } Ok(()) @@ -684,21 +678,20 @@ async fn password_emergency_access( // endregion #[get("/emergency-access//policies")] -async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn policies_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult { let requesting_user = headers.user; - let emergency_access = - match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await { - Some(emer) => emer, - None => err!("Emergency access not valid."), - }; + let Some(emergency_access) = + EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await + else { + err!("Emergency access not valid.") + }; if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) { err!("Emergency access not valid.") } - let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await { - Some(user) => user, - None => err!("Grantor user not found."), + let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else { + err!("Grantor user not found.") }; let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &mut conn); @@ -713,11 +706,11 @@ async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: Db fn is_valid_request( emergency_access: &EmergencyAccess, - requesting_user_uuid: &str, + requesting_user_id: &UserId, requested_access_type: EmergencyAccessType, ) -> bool { emergency_access.grantee_uuid.is_some() - && emergency_access.grantee_uuid.as_ref().unwrap() == requesting_user_uuid + && emergency_access.grantee_uuid.as_ref().unwrap() == requesting_user_id && emergency_access.status == EmergencyAccessStatus::RecoveryApproved as i32 && emergency_access.atype == requested_access_type as i32 } diff --git a/src/api/core/events.rs b/src/api/core/events.rs index 484094f5..3a7d41f0 100644 --- a/src/api/core/events.rs +++ b/src/api/core/events.rs @@ -8,7 +8,7 @@ use crate::{ api::{EmptyResult, JsonResult}, auth::{AdminHeaders, Headers}, db::{ - models::{Cipher, Event, UserOrganization}, + models::{Cipher, CipherId, Event, Membership, MembershipId, OrganizationId, UserId}, DbConn, DbPool, }, util::parse_date, @@ -31,7 +31,16 @@ struct EventRange { // Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41 #[get("/organizations//events?")] -async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { +async fn get_org_events( + org_id: OrganizationId, + data: EventRange, + headers: AdminHeaders, + mut conn: DbConn, +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + // Return an empty vec when we org events are disabled. // This prevents client errors let events_json: Vec = if !CONFIG.org_events_enabled() { @@ -44,7 +53,7 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders, parse_date(&data.end) }; - Event::find_by_organization_uuid(org_id, &start_date, &end_date, &mut conn) + Event::find_by_organization_uuid(&org_id, &start_date, &end_date, &mut conn) .await .iter() .map(|e| e.to_json()) @@ -59,14 +68,14 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders, } #[get("/ciphers//events?")] -async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn get_cipher_events(cipher_id: CipherId, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult { // Return an empty vec when we org events are disabled. // This prevents client errors let events_json: Vec = if !CONFIG.org_events_enabled() { Vec::with_capacity(0) } else { let mut events_json = Vec::with_capacity(0); - if UserOrganization::user_has_ge_admin_access_to_cipher(&headers.user.uuid, cipher_id, &mut conn).await { + if Membership::user_has_ge_admin_access_to_cipher(&headers.user.uuid, &cipher_id, &mut conn).await { let start_date = parse_date(&data.start); let end_date = if let Some(before_date) = &data.continuation_token { parse_date(before_date) @@ -74,7 +83,7 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers, parse_date(&data.end) }; - events_json = Event::find_by_cipher_uuid(cipher_id, &start_date, &end_date, &mut conn) + events_json = Event::find_by_cipher_uuid(&cipher_id, &start_date, &end_date, &mut conn) .await .iter() .map(|e| e.to_json()) @@ -90,14 +99,17 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers, }))) } -#[get("/organizations//users//events?")] +#[get("/organizations//users//events?")] async fn get_user_events( - org_id: &str, - user_org_id: &str, + org_id: OrganizationId, + member_id: MembershipId, data: EventRange, - _headers: AdminHeaders, + headers: AdminHeaders, mut conn: DbConn, ) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } // Return an empty vec when we org events are disabled. // This prevents client errors let events_json: Vec = if !CONFIG.org_events_enabled() { @@ -110,7 +122,7 @@ async fn get_user_events( parse_date(&data.end) }; - Event::find_by_org_and_user_org(org_id, user_org_id, &start_date, &end_date, &mut conn) + Event::find_by_org_and_member(&org_id, &member_id, &start_date, &end_date, &mut conn) .await .iter() .map(|e| e.to_json()) @@ -152,8 +164,8 @@ struct EventCollection { date: String, // Optional - cipher_id: Option, - organization_id: Option, + cipher_id: Option, + organization_id: Option, } // Upstream: @@ -180,11 +192,11 @@ async fn post_events_collect(data: Json>, headers: Headers, .await; } 1600..=1699 => { - if let Some(org_uuid) = &event.organization_id { + if let Some(org_id) = &event.organization_id { _log_event( event.r#type, - org_uuid, - org_uuid, + org_id, + org_id, &headers.user.uuid, headers.device.atype, Some(event_date), @@ -197,11 +209,11 @@ async fn post_events_collect(data: Json>, headers: Headers, _ => { if let Some(cipher_uuid) = &event.cipher_id { if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await { - if let Some(org_uuid) = cipher.organization_uuid { + if let Some(org_id) = cipher.organization_uuid { _log_event( event.r#type, cipher_uuid, - &org_uuid, + &org_id, &headers.user.uuid, headers.device.atype, Some(event_date), @@ -218,38 +230,39 @@ async fn post_events_collect(data: Json>, headers: Headers, Ok(()) } -pub async fn log_user_event(event_type: i32, user_uuid: &str, device_type: i32, ip: &IpAddr, conn: &mut DbConn) { +pub async fn log_user_event(event_type: i32, user_id: &UserId, device_type: i32, ip: &IpAddr, conn: &mut DbConn) { if !CONFIG.org_events_enabled() { return; } - _log_user_event(event_type, user_uuid, device_type, None, ip, conn).await; + _log_user_event(event_type, user_id, device_type, None, ip, conn).await; } async fn _log_user_event( event_type: i32, - user_uuid: &str, + user_id: &UserId, device_type: i32, event_date: Option, ip: &IpAddr, conn: &mut DbConn, ) { - let orgs = UserOrganization::get_org_uuid_by_user(user_uuid, conn).await; - let mut events: Vec = Vec::with_capacity(orgs.len() + 1); // We need an event per org and one without an org + let memberships = Membership::find_by_user(user_id, conn).await; + let mut events: Vec = Vec::with_capacity(memberships.len() + 1); // We need an event per org and one without an org - // Upstream saves the event also without any org_uuid. + // Upstream saves the event also without any org_id. let mut event = Event::new(event_type, event_date); - event.user_uuid = Some(String::from(user_uuid)); - event.act_user_uuid = Some(String::from(user_uuid)); + event.user_uuid = Some(user_id.clone()); + event.act_user_uuid = Some(user_id.clone()); event.device_type = Some(device_type); event.ip_address = Some(ip.to_string()); events.push(event); // For each org a user is a member of store these events per org - for org_uuid in orgs { + for membership in memberships { let mut event = Event::new(event_type, event_date); - event.user_uuid = Some(String::from(user_uuid)); - event.org_uuid = Some(org_uuid); - event.act_user_uuid = Some(String::from(user_uuid)); + event.user_uuid = Some(user_id.clone()); + event.org_uuid = Some(membership.org_uuid); + event.org_user_uuid = Some(membership.uuid); + event.act_user_uuid = Some(user_id.clone()); event.device_type = Some(device_type); event.ip_address = Some(ip.to_string()); events.push(event); @@ -261,8 +274,8 @@ async fn _log_user_event( pub async fn log_event( event_type: i32, source_uuid: &str, - org_uuid: &str, - act_user_uuid: &str, + org_id: &OrganizationId, + act_user_id: &UserId, device_type: i32, ip: &IpAddr, conn: &mut DbConn, @@ -270,15 +283,15 @@ pub async fn log_event( if !CONFIG.org_events_enabled() { return; } - _log_event(event_type, source_uuid, org_uuid, act_user_uuid, device_type, None, ip, conn).await; + _log_event(event_type, source_uuid, org_id, act_user_id, device_type, None, ip, conn).await; } #[allow(clippy::too_many_arguments)] async fn _log_event( event_type: i32, source_uuid: &str, - org_uuid: &str, - act_user_uuid: &str, + org_id: &OrganizationId, + act_user_id: &UserId, device_type: i32, event_date: Option, ip: &IpAddr, @@ -290,31 +303,31 @@ async fn _log_event( // 1000..=1099 Are user events, they need to be logged via log_user_event() // Cipher Events 1100..=1199 => { - event.cipher_uuid = Some(String::from(source_uuid)); + event.cipher_uuid = Some(source_uuid.to_string().into()); } // Collection Events 1300..=1399 => { - event.collection_uuid = Some(String::from(source_uuid)); + event.collection_uuid = Some(source_uuid.to_string().into()); } // Group Events 1400..=1499 => { - event.group_uuid = Some(String::from(source_uuid)); + event.group_uuid = Some(source_uuid.to_string().into()); } // Org User Events 1500..=1599 => { - event.org_user_uuid = Some(String::from(source_uuid)); + event.org_user_uuid = Some(source_uuid.to_string().into()); } // 1600..=1699 Are organizational events, and they do not need the source_uuid // Policy Events 1700..=1799 => { - event.policy_uuid = Some(String::from(source_uuid)); + event.policy_uuid = Some(source_uuid.to_string().into()); } // Ignore others _ => {} } - event.org_uuid = Some(String::from(org_uuid)); - event.act_user_uuid = Some(String::from(act_user_uuid)); + event.org_uuid = Some(org_id.clone()); + event.act_user_uuid = Some(act_user_id.clone()); event.device_type = Some(device_type); event.ip_address = Some(ip.to_string()); event.save(conn).await.unwrap_or(()); diff --git a/src/api/core/folders.rs b/src/api/core/folders.rs index 9766d7a1..01dea4bb 100644 --- a/src/api/core/folders.rs +++ b/src/api/core/folders.rs @@ -23,25 +23,19 @@ async fn get_folders(headers: Headers, mut conn: DbConn) -> Json { })) } -#[get("/folders/")] -async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult { - let folder = match Folder::find_by_uuid(uuid, &mut conn).await { - Some(folder) => folder, - _ => err!("Invalid folder"), - }; - - if folder.user_uuid != headers.user.uuid { - err!("Folder belongs to another user") +#[get("/folders/")] +async fn get_folder(folder_id: FolderId, headers: Headers, mut conn: DbConn) -> JsonResult { + match Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await { + Some(folder) => Ok(Json(folder.to_json())), + _ => err!("Invalid folder", "Folder does not exist or belongs to another user"), } - - Ok(Json(folder.to_json())) } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct FolderData { pub name: String, - pub id: Option, + pub id: Option, } #[post("/folders", data = "")] @@ -56,14 +50,20 @@ async fn post_folders(data: Json, headers: Headers, mut conn: DbConn Ok(Json(folder.to_json())) } -#[post("/folders/", data = "")] -async fn post_folder(uuid: &str, data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { - put_folder(uuid, data, headers, conn, nt).await +#[post("/folders/", data = "")] +async fn post_folder( + folder_id: FolderId, + data: Json, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { + put_folder(folder_id, data, headers, conn, nt).await } -#[put("/folders/", data = "")] +#[put("/folders/", data = "")] async fn put_folder( - uuid: &str, + folder_id: FolderId, data: Json, headers: Headers, mut conn: DbConn, @@ -71,15 +71,10 @@ async fn put_folder( ) -> JsonResult { let data: FolderData = data.into_inner(); - let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await { - Some(folder) => folder, - _ => err!("Invalid folder"), + let Some(mut folder) = Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await else { + err!("Invalid folder", "Folder does not exist or belongs to another user") }; - if folder.user_uuid != headers.user.uuid { - err!("Folder belongs to another user") - } - folder.name = data.name; folder.save(&mut conn).await?; @@ -88,22 +83,17 @@ async fn put_folder( Ok(Json(folder.to_json())) } -#[post("/folders//delete")] -async fn delete_folder_post(uuid: &str, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { - delete_folder(uuid, headers, conn, nt).await +#[post("/folders//delete")] +async fn delete_folder_post(folder_id: FolderId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + delete_folder(folder_id, headers, conn, nt).await } -#[delete("/folders/")] -async fn delete_folder(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - let folder = match Folder::find_by_uuid(uuid, &mut conn).await { - Some(folder) => folder, - _ => err!("Invalid folder"), +#[delete("/folders/")] +async fn delete_folder(folder_id: FolderId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { + let Some(folder) = Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await else { + err!("Invalid folder", "Folder does not exist or belongs to another user") }; - if folder.user_uuid != headers.user.uuid { - err!("Folder belongs to another user") - } - // Delete the actual folder entry folder.delete(&mut conn).await?; diff --git a/src/api/core/mod.rs b/src/api/core/mod.rs index 122bf44f..3aa9ad79 100644 --- a/src/api/core/mod.rs +++ b/src/api/core/mod.rs @@ -18,7 +18,7 @@ pub use sends::purge_sends; pub fn routes() -> Vec { let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains]; let mut hibp_routes = routes![hibp_breach]; - let mut meta_routes = routes![alive, now, version, config]; + let mut meta_routes = routes![alive, now, version, config, get_api_webauthn]; let mut routes = Vec::new(); routes.append(&mut accounts::routes()); @@ -184,6 +184,18 @@ fn version() -> Json<&'static str> { Json(crate::VERSION.unwrap_or_default()) } +#[get("/webauthn")] +fn get_api_webauthn(_headers: Headers) -> Json { + // Prevent a 404 error, which also causes key-rotation issues + // It looks like this is used when login with passkeys is enabled, which Vaultwarden does not (yet) support + // An empty list/data also works fine + Json(json!({ + "object": "list", + "data": [], + "continuationToken": null + })) +} + #[get("/config")] fn config() -> Json { let domain = crate::CONFIG.domain(); @@ -201,8 +213,8 @@ fn config() -> Json { // This means they expect a version that closely matches the Bitwarden server version // We should make sure that we keep this updated when we support the new server features // Version history: - // - Individual cipher key encryption: 2023.9.1 - "version": "2024.2.0", + // - Individual cipher key encryption: 2024.2.0 + "version": "2025.1.0", "gitHash": option_env!("GIT_REV"), "server": { "name": "Vaultwarden", diff --git a/src/api/core/organizations.rs b/src/api/core/organizations.rs index 2b51a144..aabcc5e2 100644 --- a/src/api/core/organizations.rs +++ b/src/api/core/organizations.rs @@ -4,12 +4,16 @@ use rocket::Route; use serde_json::Value; use std::collections::{HashMap, HashSet}; +use crate::api::admin::FAKE_ADMIN_UUID; use crate::{ api::{ core::{log_event, two_factor, CipherSyncData, CipherSyncType}, EmptyResult, JsonResult, Notify, PasswordOrOtpData, UpdateType, }, - auth::{decode_invite, AdminHeaders, ClientVersion, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders}, + auth::{ + decode_invite, AdminHeaders, ClientVersion, Headers, ManagerHeaders, ManagerHeadersLoose, OrgMemberHeaders, + OwnerHeaders, + }, db::{models::*, DbConn}, mail, util::{convert_json_key_lcase_first, NumberOrString}, @@ -32,8 +36,9 @@ pub fn routes() -> Vec { put_organization, post_organization, post_organization_collections, - delete_organization_collection_user, - post_organization_collection_delete_user, + delete_organization_collection_member, + post_organization_collection_delete_member, + post_bulk_access_collections, post_organization_collection_update, put_organization_collection_update, delete_organization_collection, @@ -41,19 +46,20 @@ pub fn routes() -> Vec { bulk_delete_organization_collections, post_bulk_collections, get_org_details, - get_org_users, + get_members, send_invite, - reinvite_user, - bulk_reinvite_user, + reinvite_member, + bulk_reinvite_members, confirm_invite, bulk_confirm_invite, accept_invite, + get_org_user_mini_details, get_user, - edit_user, - put_organization_user, - delete_user, - bulk_delete_user, - post_delete_user, + edit_member, + put_member, + delete_member, + bulk_delete_member, + post_delete_member, post_org_import, list_policies, list_policies_token, @@ -68,15 +74,16 @@ pub fn routes() -> Vec { get_organization_keys, get_organization_public_key, bulk_public_keys, - deactivate_organization_user, - bulk_deactivate_organization_user, - revoke_organization_user, - bulk_revoke_organization_user, - activate_organization_user, - bulk_activate_organization_user, - restore_organization_user, - bulk_restore_organization_user, + deactivate_member, + bulk_deactivate_members, + revoke_member, + bulk_revoke_members, + activate_member, + bulk_activate_members, + restore_member, + bulk_restore_members, get_groups, + get_groups_details, post_groups, get_group, put_group, @@ -85,19 +92,20 @@ pub fn routes() -> Vec { delete_group, post_delete_group, bulk_delete_groups, - get_group_users, - put_group_users, + get_group_members, + put_group_members, get_user_groups, post_user_groups, put_user_groups, - delete_group_user, - post_delete_group_user, + delete_group_member, + post_delete_group_member, put_reset_password_enrollment, get_reset_password_details, put_reset_password, get_org_export, api_key, rotate_api_key, + get_billing_metadata, ] } @@ -122,20 +130,30 @@ struct OrganizationUpdateData { #[derive(Deserialize)] #[serde(rename_all = "camelCase")] -struct NewCollectionData { +struct FullCollectionData { name: String, - groups: Vec, - users: Vec, - id: Option, + groups: Vec, + users: Vec, + id: Option, external_id: Option, } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] -struct NewCollectionObjectData { +struct CollectionGroupData { hide_passwords: bool, - id: String, + id: GroupId, read_only: bool, + manage: bool, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct CollectionMembershipData { + hide_passwords: bool, + id: MembershipId, + read_only: bool, + manage: bool, } #[derive(Deserialize)] @@ -147,8 +165,14 @@ struct OrgKeyData { #[derive(Deserialize, Debug)] #[serde(rename_all = "camelCase")] -struct OrgBulkIds { - ids: Vec, +struct BulkGroupIds { + ids: Vec, +} + +#[derive(Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +struct BulkMembershipIds { + ids: Vec, } #[post("/organizations", data = "")] @@ -171,16 +195,16 @@ async fn create_organization(headers: Headers, data: Json, mut conn: Db }; let org = Organization::new(data.name, data.billing_email, private_key, public_key); - let mut user_org = UserOrganization::new(headers.user.uuid, org.uuid.clone()); + let mut member = Membership::new(headers.user.uuid, org.uuid.clone()); let collection = Collection::new(org.uuid.clone(), data.collection_name, None); - user_org.akey = data.key; - user_org.access_all = true; - user_org.atype = UserOrgType::Owner as i32; - user_org.status = UserOrgStatus::Confirmed as i32; + member.akey = data.key; + member.access_all = true; + member.atype = MembershipType::Owner as i32; + member.status = MembershipStatus::Confirmed as i32; org.save(&mut conn).await?; - user_org.save(&mut conn).await?; + member.save(&mut conn).await?; collection.save(&mut conn).await?; Ok(Json(org.to_json())) @@ -188,16 +212,19 @@ async fn create_organization(headers: Headers, data: Json, mut conn: Db #[delete("/organizations/", data = "")] async fn delete_organization( - org_id: &str, + org_id: OrganizationId, data: Json, headers: OwnerHeaders, mut conn: DbConn, ) -> EmptyResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } let data: PasswordOrOtpData = data.into_inner(); data.validate(&headers.user, true, &mut conn).await?; - match Organization::find_by_uuid(org_id, &mut conn).await { + match Organization::find_by_uuid(&org_id, &mut conn).await { None => err!("Organization not found"), Some(org) => org.delete(&mut conn).await, } @@ -205,7 +232,7 @@ async fn delete_organization( #[post("/organizations//delete", data = "")] async fn post_delete_organization( - org_id: &str, + org_id: OrganizationId, data: Json, headers: OwnerHeaders, conn: DbConn, @@ -214,20 +241,20 @@ async fn post_delete_organization( } #[post("/organizations//leave")] -async fn leave_organization(org_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult { - match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await { +async fn leave_organization(org_id: OrganizationId, headers: Headers, mut conn: DbConn) -> EmptyResult { + match Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &mut conn).await { None => err!("User not part of organization"), - Some(user_org) => { - if user_org.atype == UserOrgType::Owner - && UserOrganization::count_confirmed_by_org_and_type(org_id, UserOrgType::Owner, &mut conn).await <= 1 + Some(member) => { + if member.atype == MembershipType::Owner + && Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &mut conn).await <= 1 { err!("The last owner can't leave") } log_event( - EventType::OrganizationUserRemoved as i32, - &user_org.uuid, - org_id, + EventType::OrganizationUserLeft as i32, + &member.uuid, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -235,14 +262,17 @@ async fn leave_organization(org_id: &str, headers: Headers, mut conn: DbConn) -> ) .await; - user_org.delete(&mut conn).await + member.delete(&mut conn).await } } } #[get("/organizations/")] -async fn get_organization(org_id: &str, _headers: OwnerHeaders, mut conn: DbConn) -> JsonResult { - match Organization::find_by_uuid(org_id, &mut conn).await { +async fn get_organization(org_id: OrganizationId, headers: OwnerHeaders, mut conn: DbConn) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + match Organization::find_by_uuid(&org_id, &mut conn).await { Some(organization) => Ok(Json(organization.to_json())), None => err!("Can't find organization details"), } @@ -250,7 +280,7 @@ async fn get_organization(org_id: &str, _headers: OwnerHeaders, mut conn: DbConn #[put("/organizations/", data = "")] async fn put_organization( - org_id: &str, + org_id: OrganizationId, headers: OwnerHeaders, data: Json, conn: DbConn, @@ -260,27 +290,30 @@ async fn put_organization( #[post("/organizations/", data = "")] async fn post_organization( - org_id: &str, + org_id: OrganizationId, headers: OwnerHeaders, data: Json, mut conn: DbConn, ) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + let data: OrganizationUpdateData = data.into_inner(); - let mut org = match Organization::find_by_uuid(org_id, &mut conn).await { - Some(organization) => organization, - None => err!("Can't find organization details"), + let Some(mut org) = Organization::find_by_uuid(&org_id, &mut conn).await else { + err!("Organization not found") }; org.name = data.name; - org.billing_email = data.billing_email; + org.billing_email = data.billing_email.to_lowercase(); org.save(&mut conn).await?; log_event( EventType::OrganizationUpdated as i32, - org_id, - org_id, + org_id.as_ref(), + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -306,43 +339,60 @@ async fn get_user_collections(headers: Headers, mut conn: DbConn) -> Json } #[get("/organizations//collections")] -async fn get_org_collections(org_id: &str, _headers: ManagerHeadersLoose, mut conn: DbConn) -> Json { - Json(json!({ - "data": _get_org_collections(org_id, &mut conn).await, +async fn get_org_collections(org_id: OrganizationId, headers: ManagerHeadersLoose, mut conn: DbConn) -> JsonResult { + if org_id != headers.membership.org_uuid { + err!("Organization not found", "Organization id's do not match"); + } + Ok(Json(json!({ + "data": _get_org_collections(&org_id, &mut conn).await, "object": "list", "continuationToken": null, - })) + }))) } #[get("/organizations//collections/details")] -async fn get_org_collections_details(org_id: &str, headers: ManagerHeadersLoose, mut conn: DbConn) -> JsonResult { +async fn get_org_collections_details( + org_id: OrganizationId, + headers: ManagerHeadersLoose, + mut conn: DbConn, +) -> JsonResult { + if org_id != headers.membership.org_uuid { + err!("Organization not found", "Organization id's do not match"); + } let mut data = Vec::new(); - let user_org = match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await { - Some(u) => u, - None => err!("User is not part of organization"), + let Some(member) = Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &mut conn).await else { + err!("User is not part of organization") }; // get all collection memberships for the current organization - let coll_users = CollectionUser::find_by_organization(org_id, &mut conn).await; + let col_users = CollectionUser::find_by_organization_swap_user_uuid_with_member_uuid(&org_id, &mut conn).await; + // Generate a HashMap to get the correct MembershipType per user to determine the manage permission + // We use the uuid instead of the user_uuid here, since that is what is used in CollectionUser + let membership_type: HashMap = + Membership::find_confirmed_by_org(&org_id, &mut conn).await.into_iter().map(|m| (m.uuid, m.atype)).collect(); // check if current user has full access to the organization (either directly or via any group) - let has_full_access_to_org = user_org.access_all + let has_full_access_to_org = member.access_all || (CONFIG.org_groups_enabled() - && GroupUser::has_full_access_by_member(org_id, &user_org.uuid, &mut conn).await); + && GroupUser::has_full_access_by_member(&org_id, &member.uuid, &mut conn).await); - for col in Collection::find_by_organization(org_id, &mut conn).await { + for col in Collection::find_by_organization(&org_id, &mut conn).await { // check whether the current user has access to the given collection let assigned = has_full_access_to_org - || CollectionUser::has_access_to_collection_by_user(&col.uuid, &user_org.user_uuid, &mut conn).await + || CollectionUser::has_access_to_collection_by_user(&col.uuid, &member.user_uuid, &mut conn).await || (CONFIG.org_groups_enabled() - && GroupUser::has_access_to_collection_by_member(&col.uuid, &user_org.uuid, &mut conn).await); + && GroupUser::has_access_to_collection_by_member(&col.uuid, &member.uuid, &mut conn).await); // get the users assigned directly to the given collection - let users: Vec = coll_users + let users: Vec = col_users .iter() - .filter(|collection_user| collection_user.collection_uuid == col.uuid) - .map(|collection_user| SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json()) + .filter(|collection_member| collection_member.collection_uuid == col.uuid) + .map(|collection_member| { + collection_member.to_json_details_for_member( + *membership_type.get(&collection_member.membership_uuid).unwrap_or(&(MembershipType::User as i32)), + ) + }) .collect(); // get the group details for the given collection @@ -350,9 +400,7 @@ async fn get_org_collections_details(org_id: &str, headers: ManagerHeadersLoose, CollectionGroup::find_by_collection(&col.uuid, &mut conn) .await .iter() - .map(|collection_group| { - SelectionReadOnly::to_collection_group_details_read_only(collection_group).to_json() - }) + .map(|collection_group| collection_group.to_json_details_for_group()) .collect() } else { Vec::with_capacity(0) @@ -374,22 +422,24 @@ async fn get_org_collections_details(org_id: &str, headers: ManagerHeadersLoose, }))) } -async fn _get_org_collections(org_id: &str, conn: &mut DbConn) -> Value { +async fn _get_org_collections(org_id: &OrganizationId, conn: &mut DbConn) -> Value { Collection::find_by_organization(org_id, conn).await.iter().map(Collection::to_json).collect::() } #[post("/organizations//collections", data = "")] async fn post_organization_collections( - org_id: &str, + org_id: OrganizationId, headers: ManagerHeadersLoose, - data: Json, + data: Json, mut conn: DbConn, ) -> JsonResult { - let data: NewCollectionData = data.into_inner(); + if org_id != headers.membership.org_uuid { + err!("Organization not found", "Organization id's do not match"); + } + let data: FullCollectionData = data.into_inner(); - let org = match Organization::find_by_uuid(org_id, &mut conn).await { - Some(organization) => organization, - None => err!("Can't find organization details"), + let Some(org) = Organization::find_by_uuid(&org_id, &mut conn).await else { + err!("Can't find organization details") }; let collection = Collection::new(org.uuid, data.name, data.external_id); @@ -398,7 +448,7 @@ async fn post_organization_collections( log_event( EventType::CollectionCreated as i32, &collection.uuid, - org_id, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -407,66 +457,144 @@ async fn post_organization_collections( .await; for group in data.groups { - CollectionGroup::new(collection.uuid.clone(), group.id, group.read_only, group.hide_passwords) + CollectionGroup::new(collection.uuid.clone(), group.id, group.read_only, group.hide_passwords, group.manage) .save(&mut conn) .await?; } for user in data.users { - let org_user = match UserOrganization::find_by_uuid(&user.id, &mut conn).await { - Some(u) => u, - None => err!("User is not part of organization"), + let Some(member) = Membership::find_by_uuid_and_org(&user.id, &org_id, &mut conn).await else { + err!("User is not part of organization") }; - if org_user.access_all { + if member.access_all { continue; } - CollectionUser::save(&org_user.user_uuid, &collection.uuid, user.read_only, user.hide_passwords, &mut conn) + CollectionUser::save( + &member.user_uuid, + &collection.uuid, + user.read_only, + user.hide_passwords, + user.manage, + &mut conn, + ) + .await?; + } + + if headers.membership.atype == MembershipType::Manager && !headers.membership.access_all { + CollectionUser::save(&headers.membership.user_uuid, &collection.uuid, false, false, false, &mut conn).await?; + } + + Ok(Json(collection.to_json_details(&headers.membership.user_uuid, None, &mut conn).await)) +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct BulkCollectionAccessData { + collection_ids: Vec, + groups: Vec, + users: Vec, +} + +#[post("/organizations//collections/bulk-access", data = "", rank = 1)] +async fn post_bulk_access_collections( + org_id: OrganizationId, + headers: ManagerHeadersLoose, + data: Json, + mut conn: DbConn, +) -> EmptyResult { + if org_id != headers.membership.org_uuid { + err!("Organization not found", "Organization id's do not match"); + } + let data: BulkCollectionAccessData = data.into_inner(); + + if Organization::find_by_uuid(&org_id, &mut conn).await.is_none() { + err!("Can't find organization details") + }; + + for col_id in data.collection_ids { + let Some(collection) = Collection::find_by_uuid_and_org(&col_id, &org_id, &mut conn).await else { + err!("Collection not found") + }; + + // update collection modification date + collection.save(&mut conn).await?; + + log_event( + EventType::CollectionUpdated as i32, + &collection.uuid, + &org_id, + &headers.user.uuid, + headers.device.atype, + &headers.ip.ip, + &mut conn, + ) + .await; + + CollectionGroup::delete_all_by_collection(&col_id, &mut conn).await?; + for group in &data.groups { + CollectionGroup::new(col_id.clone(), group.id.clone(), group.read_only, group.hide_passwords, group.manage) + .save(&mut conn) + .await?; + } + + CollectionUser::delete_all_by_collection(&col_id, &mut conn).await?; + for user in &data.users { + let Some(member) = Membership::find_by_uuid_and_org(&user.id, &org_id, &mut conn).await else { + err!("User is not part of organization") + }; + + if member.access_all { + continue; + } + + CollectionUser::save( + &member.user_uuid, + &col_id, + user.read_only, + user.hide_passwords, + user.manage, + &mut conn, + ) .await?; + } } - if headers.org_user.atype == UserOrgType::Manager && !headers.org_user.access_all { - CollectionUser::save(&headers.org_user.user_uuid, &collection.uuid, false, false, &mut conn).await?; - } - - Ok(Json(collection.to_json())) + Ok(()) } #[put("/organizations//collections/", data = "")] async fn put_organization_collection_update( - org_id: &str, - col_id: &str, + org_id: OrganizationId, + col_id: CollectionId, headers: ManagerHeaders, - data: Json, + data: Json, conn: DbConn, ) -> JsonResult { post_organization_collection_update(org_id, col_id, headers, data, conn).await } -#[post("/organizations//collections/", data = "")] +#[post("/organizations//collections/", data = "", rank = 2)] async fn post_organization_collection_update( - org_id: &str, - col_id: &str, + org_id: OrganizationId, + col_id: CollectionId, headers: ManagerHeaders, - data: Json, + data: Json, mut conn: DbConn, ) -> JsonResult { - let data: NewCollectionData = data.into_inner(); - - let org = match Organization::find_by_uuid(org_id, &mut conn).await { - Some(organization) => organization, - None => err!("Can't find organization details"), - }; - - let mut collection = match Collection::find_by_uuid(col_id, &mut conn).await { - Some(collection) => collection, - None => err!("Collection not found"), - }; - - if collection.org_uuid != org.uuid { - err!("Collection is not owned by organization"); + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); } + let data: FullCollectionData = data.into_inner(); + + if Organization::find_by_uuid(&org_id, &mut conn).await.is_none() { + err!("Can't find organization details") + }; + + let Some(mut collection) = Collection::find_by_uuid_and_org(&col_id, &org_id, &mut conn).await else { + err!("Collection not found") + }; collection.name = data.name; collection.external_id = match data.external_id { @@ -479,7 +607,7 @@ async fn post_organization_collection_update( log_event( EventType::CollectionUpdated as i32, &collection.uuid, - org_id, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -487,55 +615,51 @@ async fn post_organization_collection_update( ) .await; - CollectionGroup::delete_all_by_collection(col_id, &mut conn).await?; + CollectionGroup::delete_all_by_collection(&col_id, &mut conn).await?; for group in data.groups { - CollectionGroup::new(String::from(col_id), group.id, group.read_only, group.hide_passwords) + CollectionGroup::new(col_id.clone(), group.id, group.read_only, group.hide_passwords, group.manage) .save(&mut conn) .await?; } - CollectionUser::delete_all_by_collection(col_id, &mut conn).await?; + CollectionUser::delete_all_by_collection(&col_id, &mut conn).await?; for user in data.users { - let org_user = match UserOrganization::find_by_uuid(&user.id, &mut conn).await { - Some(u) => u, - None => err!("User is not part of organization"), + let Some(member) = Membership::find_by_uuid_and_org(&user.id, &org_id, &mut conn).await else { + err!("User is not part of organization") }; - if org_user.access_all { + if member.access_all { continue; } - CollectionUser::save(&org_user.user_uuid, col_id, user.read_only, user.hide_passwords, &mut conn).await?; + CollectionUser::save(&member.user_uuid, &col_id, user.read_only, user.hide_passwords, user.manage, &mut conn) + .await?; } Ok(Json(collection.to_json_details(&headers.user.uuid, None, &mut conn).await)) } -#[delete("/organizations//collections//user/")] -async fn delete_organization_collection_user( - org_id: &str, - col_id: &str, - org_user_id: &str, - _headers: AdminHeaders, +#[delete("/organizations//collections//user/")] +async fn delete_organization_collection_member( + org_id: OrganizationId, + col_id: CollectionId, + member_id: MembershipId, + headers: AdminHeaders, mut conn: DbConn, ) -> EmptyResult { - let collection = match Collection::find_by_uuid(col_id, &mut conn).await { - None => err!("Collection not found"), - Some(collection) => { - if collection.org_uuid == org_id { - collection - } else { - err!("Collection and Organization id do not match") - } - } + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + let Some(collection) = Collection::find_by_uuid_and_org(&col_id, &org_id, &mut conn).await else { + err!("Collection not found", "Collection does not exist or does not belong to this organization") }; - match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &mut conn).await { + match Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await { None => err!("User not found in organization"), - Some(user_org) => { - match CollectionUser::find_by_collection_and_user(&collection.uuid, &user_org.user_uuid, &mut conn).await { + Some(member) => { + match CollectionUser::find_by_collection_and_user(&collection.uuid, &member.user_uuid, &mut conn).await { None => err!("User not assigned to collection"), Some(col_user) => col_user.delete(&mut conn).await, } @@ -543,53 +667,47 @@ async fn delete_organization_collection_user( } } -#[post("/organizations//collections//delete-user/")] -async fn post_organization_collection_delete_user( - org_id: &str, - col_id: &str, - org_user_id: &str, +#[post("/organizations//collections//delete-user/")] +async fn post_organization_collection_delete_member( + org_id: OrganizationId, + col_id: CollectionId, + member_id: MembershipId, headers: AdminHeaders, conn: DbConn, ) -> EmptyResult { - delete_organization_collection_user(org_id, col_id, org_user_id, headers, conn).await + delete_organization_collection_member(org_id, col_id, member_id, headers, conn).await } async fn _delete_organization_collection( - org_id: &str, - col_id: &str, + org_id: &OrganizationId, + col_id: &CollectionId, headers: &ManagerHeaders, conn: &mut DbConn, ) -> EmptyResult { - match Collection::find_by_uuid(col_id, conn).await { - None => err!("Collection not found"), - Some(collection) => { - if collection.org_uuid == org_id { - log_event( - EventType::CollectionDeleted as i32, - &collection.uuid, - org_id, - &headers.user.uuid, - headers.device.atype, - &headers.ip.ip, - conn, - ) - .await; - collection.delete(conn).await - } else { - err!("Collection and Organization id do not match") - } - } - } + let Some(collection) = Collection::find_by_uuid_and_org(col_id, org_id, conn).await else { + err!("Collection not found", "Collection does not exist or does not belong to this organization") + }; + log_event( + EventType::CollectionDeleted as i32, + &collection.uuid, + org_id, + &headers.user.uuid, + headers.device.atype, + &headers.ip.ip, + conn, + ) + .await; + collection.delete(conn).await } #[delete("/organizations//collections/")] async fn delete_organization_collection( - org_id: &str, - col_id: &str, + org_id: OrganizationId, + col_id: CollectionId, headers: ManagerHeaders, mut conn: DbConn, ) -> EmptyResult { - _delete_organization_collection(org_id, col_id, &headers, &mut conn).await + _delete_organization_collection(&org_id, &col_id, &headers, &mut conn).await } #[derive(Deserialize, Debug)] @@ -598,33 +716,35 @@ struct DeleteCollectionData { #[allow(dead_code)] id: String, #[allow(dead_code)] - org_id: String, + org_id: OrganizationId, } -#[post("/organizations//collections//delete", data = "<_data>")] +#[post("/organizations//collections//delete")] async fn post_organization_collection_delete( - org_id: &str, - col_id: &str, + org_id: OrganizationId, + col_id: CollectionId, headers: ManagerHeaders, - _data: Json, mut conn: DbConn, ) -> EmptyResult { - _delete_organization_collection(org_id, col_id, &headers, &mut conn).await + _delete_organization_collection(&org_id, &col_id, &headers, &mut conn).await } #[derive(Deserialize, Debug)] #[serde(rename_all = "camelCase")] struct BulkCollectionIds { - ids: Vec, + ids: Vec, } #[delete("/organizations//collections", data = "")] async fn bulk_delete_organization_collections( - org_id: &str, + org_id: OrganizationId, headers: ManagerHeadersLoose, data: Json, mut conn: DbConn, ) -> EmptyResult { + if org_id != headers.membership.org_uuid { + err!("Organization not found", "Organization id's do not match"); + } let data: BulkCollectionIds = data.into_inner(); let collections = data.ids; @@ -632,37 +752,37 @@ async fn bulk_delete_organization_collections( let headers = ManagerHeaders::from_loose(headers, &collections, &mut conn).await?; for col_id in collections { - _delete_organization_collection(org_id, &col_id, &headers, &mut conn).await? + _delete_organization_collection(&org_id, &col_id, &headers, &mut conn).await? } Ok(()) } -#[get("/organizations//collections//details")] +#[get("/organizations//collections//details")] async fn get_org_collection_detail( - org_id: &str, - coll_id: &str, + org_id: OrganizationId, + col_id: CollectionId, headers: ManagerHeaders, mut conn: DbConn, ) -> JsonResult { - match Collection::find_by_uuid_and_user(coll_id, headers.user.uuid.clone(), &mut conn).await { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + match Collection::find_by_uuid_and_user(&col_id, headers.user.uuid.clone(), &mut conn).await { None => err!("Collection not found"), Some(collection) => { if collection.org_uuid != org_id { err!("Collection is not owned by organization") } - let user_org = match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await { - Some(u) => u, - None => err!("User is not part of organization"), + let Some(member) = Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &mut conn).await else { + err!("User is not part of organization") }; let groups: Vec = if CONFIG.org_groups_enabled() { CollectionGroup::find_by_collection(&collection.uuid, &mut conn) .await .iter() - .map(|collection_group| { - SelectionReadOnly::to_collection_group_details_read_only(collection_group).to_json() - }) + .map(|collection_group| collection_group.to_json_details_for_group()) .collect() } else { // The Bitwarden clients seem to call this API regardless of whether groups are enabled, @@ -670,16 +790,29 @@ async fn get_org_collection_detail( Vec::with_capacity(0) }; - let users: Vec = - CollectionUser::find_by_collection_swap_user_uuid_with_org_user_uuid(&collection.uuid, &mut conn) - .await - .iter() - .map(|collection_user| { - SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json() - }) - .collect(); + // Generate a HashMap to get the correct MembershipType per user to determine the manage permission + // We use the uuid instead of the user_uuid here, since that is what is used in CollectionUser + let membership_type: HashMap = Membership::find_confirmed_by_org(&org_id, &mut conn) + .await + .into_iter() + .map(|m| (m.uuid, m.atype)) + .collect(); - let assigned = Collection::can_access_collection(&user_org, &collection.uuid, &mut conn).await; + let users: Vec = CollectionUser::find_by_org_and_coll_swap_user_uuid_with_member_uuid( + &org_id, + &collection.uuid, + &mut conn, + ) + .await + .iter() + .map(|collection_member| { + collection_member.to_json_details_for_member( + *membership_type.get(&collection_member.membership_uuid).unwrap_or(&(MembershipType::User as i32)), + ) + }) + .collect(); + + let assigned = Collection::can_access_collection(&member, &collection.uuid, &mut conn).await; let mut json_object = collection.to_json_details(&headers.user.uuid, None, &mut conn).await; json_object["assigned"] = json!(assigned); @@ -692,55 +825,64 @@ async fn get_org_collection_detail( } } -#[get("/organizations//collections//users")] -async fn get_collection_users(org_id: &str, coll_id: &str, _headers: ManagerHeaders, mut conn: DbConn) -> JsonResult { +#[get("/organizations//collections//users")] +async fn get_collection_users( + org_id: OrganizationId, + col_id: CollectionId, + headers: ManagerHeaders, + mut conn: DbConn, +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } // Get org and collection, check that collection is from org - let collection = match Collection::find_by_uuid_and_org(coll_id, org_id, &mut conn).await { - None => err!("Collection not found in Organization"), - Some(collection) => collection, + let Some(collection) = Collection::find_by_uuid_and_org(&col_id, &org_id, &mut conn).await else { + err!("Collection not found in Organization") }; - let mut user_list = Vec::new(); + let mut member_list = Vec::new(); for col_user in CollectionUser::find_by_collection(&collection.uuid, &mut conn).await { - user_list.push( - UserOrganization::find_by_user_and_org(&col_user.user_uuid, org_id, &mut conn) + member_list.push( + Membership::find_by_user_and_org(&col_user.user_uuid, &org_id, &mut conn) .await .unwrap() .to_json_user_access_restrictions(&col_user), ); } - Ok(Json(json!(user_list))) + Ok(Json(json!(member_list))) } -#[put("/organizations//collections//users", data = "")] +#[put("/organizations//collections//users", data = "")] async fn put_collection_users( - org_id: &str, - coll_id: &str, - data: Json>, - _headers: ManagerHeaders, + org_id: OrganizationId, + col_id: CollectionId, + data: Json>, + headers: ManagerHeaders, mut conn: DbConn, ) -> EmptyResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } // Get org and collection, check that collection is from org - if Collection::find_by_uuid_and_org(coll_id, org_id, &mut conn).await.is_none() { + if Collection::find_by_uuid_and_org(&col_id, &org_id, &mut conn).await.is_none() { err!("Collection not found in Organization") } // Delete all the user-collections - CollectionUser::delete_all_by_collection(coll_id, &mut conn).await?; + CollectionUser::delete_all_by_collection(&col_id, &mut conn).await?; // And then add all the received ones (except if the user has access_all) for d in data.iter() { - let user = match UserOrganization::find_by_uuid(&d.id, &mut conn).await { - Some(u) => u, - None => err!("User is not part of organization"), + let Some(user) = Membership::find_by_uuid_and_org(&d.id, &org_id, &mut conn).await else { + err!("User is not part of organization") }; if user.access_all { continue; } - CollectionUser::save(&user.user_uuid, coll_id, d.read_only, d.hide_passwords, &mut conn).await?; + CollectionUser::save(&user.user_uuid, &col_id, d.read_only, d.hide_passwords, d.manage, &mut conn).await?; } Ok(()) @@ -749,16 +891,13 @@ async fn put_collection_users( #[derive(FromForm)] struct OrgIdData { #[field(name = "organizationId")] - organization_id: String, + organization_id: OrganizationId, } #[get("/ciphers/organization-details?")] -async fn get_org_details(data: OrgIdData, headers: Headers, mut conn: DbConn) -> JsonResult { - if UserOrganization::find_confirmed_by_user_and_org(&headers.user.uuid, &data.organization_id, &mut conn) - .await - .is_none() - { - err_code!("Resource not found.", rocket::http::Status::NotFound.code); +async fn get_org_details(data: OrgIdData, headers: OrgMemberHeaders, mut conn: DbConn) -> JsonResult { + if data.organization_id != headers.org_id { + err_code!("Resource not found.", "Organization id's do not match", rocket::http::Status::NotFound.code); } Ok(Json(json!({ @@ -768,14 +907,13 @@ async fn get_org_details(data: OrgIdData, headers: Headers, mut conn: DbConn) -> }))) } -async fn _get_org_details(org_id: &str, host: &str, user_uuid: &str, conn: &mut DbConn) -> Value { +async fn _get_org_details(org_id: &OrganizationId, host: &str, user_id: &UserId, conn: &mut DbConn) -> Value { let ciphers = Cipher::find_by_org(org_id, conn).await; - let cipher_sync_data = CipherSyncData::new(user_uuid, CipherSyncType::Organization, conn).await; + let cipher_sync_data = CipherSyncData::new(user_id, CipherSyncType::Organization, conn).await; let mut ciphers_json = Vec::with_capacity(ciphers.len()); for c in ciphers { - ciphers_json - .push(c.to_json(host, user_uuid, Some(&cipher_sync_data), CipherSyncType::Organization, conn).await); + ciphers_json.push(c.to_json(host, user_id, Some(&cipher_sync_data), CipherSyncType::Organization, conn).await); } json!(ciphers_json) } @@ -789,14 +927,17 @@ struct GetOrgUserData { } #[get("/organizations//users?")] -async fn get_org_users( +async fn get_members( data: GetOrgUserData, - org_id: &str, - _headers: ManagerHeadersLoose, + org_id: OrganizationId, + headers: ManagerHeadersLoose, mut conn: DbConn, -) -> Json { +) -> JsonResult { + if org_id != headers.membership.org_uuid { + err!("Organization not found", "Organization id's do not match"); + } let mut users_json = Vec::new(); - for u in UserOrganization::find_by_org(org_id, &mut conn).await { + for u in Membership::find_by_org(&org_id, &mut conn).await { users_json.push( u.to_json_user_details( data.include_collections.unwrap_or(false), @@ -807,18 +948,26 @@ async fn get_org_users( ); } - Json(json!({ + Ok(Json(json!({ "data": users_json, "object": "list", "continuationToken": null, - })) + }))) } #[post("/organizations//keys", data = "")] -async fn post_org_keys(org_id: &str, data: Json, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { +async fn post_org_keys( + org_id: OrganizationId, + data: Json, + headers: AdminHeaders, + mut conn: DbConn, +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } let data: OrgKeyData = data.into_inner(); - let mut org = match Organization::find_by_uuid(org_id, &mut conn).await { + let mut org = match Organization::find_by_uuid(&org_id, &mut conn).await { Some(organization) => { if organization.private_key.is_some() && organization.public_key.is_some() { err!("Organization Keys already exist") @@ -840,40 +989,58 @@ async fn post_org_keys(org_id: &str, data: Json, _headers: AdminHead }))) } -#[derive(Deserialize)] -#[serde(rename_all = "camelCase")] -struct CollectionData { - id: String, - read_only: bool, - hide_passwords: bool, -} - #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct InviteData { emails: Vec, - groups: Vec, + groups: Vec, r#type: NumberOrString, collections: Option>, #[serde(default)] access_all: bool, + #[serde(default)] + permissions: HashMap, } #[post("/organizations//users/invite", data = "")] -async fn send_invite(org_id: &str, data: Json, headers: AdminHeaders, mut conn: DbConn) -> EmptyResult { - let data: InviteData = data.into_inner(); +async fn send_invite( + org_id: OrganizationId, + data: Json, + headers: AdminHeaders, + mut conn: DbConn, +) -> EmptyResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + let mut data: InviteData = data.into_inner(); - let new_type = match UserOrgType::from_str(&data.r#type.into_string()) { + // HACK: We need the raw user-type to be sure custom role is selected to determine the access_all permission + // The from_str() will convert the custom role type into a manager role type + let raw_type = &data.r#type.into_string(); + // Membership::from_str will convert custom (4) to manager (3) + let new_type = match MembershipType::from_str(raw_type) { Some(new_type) => new_type as i32, None => err!("Invalid type"), }; - if new_type != UserOrgType::User && headers.org_user_type != UserOrgType::Owner { + if new_type != MembershipType::User && headers.membership_type != MembershipType::Owner { err!("Only Owners can invite Managers, Admins or Owners") } + // HACK: This converts the Custom role which has the `Manage all collections` box checked into an access_all flag + // Since the parent checkbox is not sent to the server we need to check and verify the child checkboxes + // If the box is not checked, the user will still be a manager, but not with the access_all permission + if raw_type.eq("4") + && data.permissions.get("editAnyCollection") == Some(&json!(true)) + && data.permissions.get("deleteAnyCollection") == Some(&json!(true)) + && data.permissions.get("createNewCollections") == Some(&json!(true)) + { + data.access_all = true; + } + + let mut user_created: bool = false; for email in data.emails.iter() { - let mut user_org_status = UserOrgStatus::Invited as i32; + let mut member_status = MembershipStatus::Invited as i32; let user = match User::find_by_mail(email, &mut conn).await { None => { if !CONFIG.invitations_allowed() { @@ -885,37 +1052,75 @@ async fn send_invite(org_id: &str, data: Json, headers: AdminHeaders } if !CONFIG.mail_enabled() { - let invitation = Invitation::new(email); - invitation.save(&mut conn).await?; + Invitation::new(email).save(&mut conn).await?; } - let mut user = User::new(email.clone()); - user.save(&mut conn).await?; - user + let mut new_user = User::new(email.clone()); + new_user.save(&mut conn).await?; + user_created = true; + new_user } Some(user) => { - if UserOrganization::find_by_user_and_org(&user.uuid, org_id, &mut conn).await.is_some() { + if Membership::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await.is_some() { err!(format!("User already in organization: {email}")) } else { // automatically accept existing users if mail is disabled if !CONFIG.mail_enabled() && !user.password_hash.is_empty() { - user_org_status = UserOrgStatus::Accepted as i32; + member_status = MembershipStatus::Accepted as i32; } user } } }; - let mut new_user = UserOrganization::new(user.uuid.clone(), String::from(org_id)); + let mut new_member = Membership::new(user.uuid.clone(), org_id.clone()); let access_all = data.access_all; - new_user.access_all = access_all; - new_user.atype = new_type; - new_user.status = user_org_status; + new_member.access_all = access_all; + new_member.atype = new_type; + new_member.status = member_status; + new_member.save(&mut conn).await?; + + if CONFIG.mail_enabled() { + let org_name = match Organization::find_by_uuid(&org_id, &mut conn).await { + Some(org) => org.name, + None => err!("Error looking up organization"), + }; + + if let Err(e) = mail::send_invite( + &user, + org_id.clone(), + new_member.uuid.clone(), + &org_name, + Some(headers.user.email.clone()), + ) + .await + { + // Upon error delete the user, invite and org member records when needed + if user_created { + user.delete(&mut conn).await?; + } else { + new_member.delete(&mut conn).await?; + } + + err!(format!("Error sending invite: {e:?} ")); + } + } + + log_event( + EventType::OrganizationUserInvited as i32, + &new_member.uuid, + &org_id, + &headers.user.uuid, + headers.device.atype, + &headers.ip.ip, + &mut conn, + ) + .await; // If no accessAll, add the collections received if !access_all { for col in data.collections.iter().flatten() { - match Collection::find_by_uuid_and_org(&col.id, org_id, &mut conn).await { + match Collection::find_by_uuid_and_org(&col.id, &org_id, &mut conn).await { None => err!("Collection not found in Organization"), Some(collection) => { CollectionUser::save( @@ -923,6 +1128,7 @@ async fn send_invite(org_id: &str, data: Json, headers: AdminHeaders &collection.uuid, col.read_only, col.hide_passwords, + col.manage, &mut conn, ) .await?; @@ -931,56 +1137,30 @@ async fn send_invite(org_id: &str, data: Json, headers: AdminHeaders } } - new_user.save(&mut conn).await?; - - for group in data.groups.iter() { - let mut group_entry = GroupUser::new(String::from(group), user.uuid.clone()); + for group_id in data.groups.iter() { + let mut group_entry = GroupUser::new(group_id.clone(), new_member.uuid.clone()); group_entry.save(&mut conn).await?; } - - log_event( - EventType::OrganizationUserInvited as i32, - &new_user.uuid, - org_id, - &headers.user.uuid, - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - - if CONFIG.mail_enabled() { - let org_name = match Organization::find_by_uuid(org_id, &mut conn).await { - Some(org) => org.name, - None => err!("Error looking up organization"), - }; - - mail::send_invite( - &user, - Some(String::from(org_id)), - Some(new_user.uuid), - &org_name, - Some(headers.user.email.clone()), - ) - .await?; - } } Ok(()) } #[post("/organizations//users/reinvite", data = "")] -async fn bulk_reinvite_user( - org_id: &str, - data: Json, +async fn bulk_reinvite_members( + org_id: OrganizationId, + data: Json, headers: AdminHeaders, mut conn: DbConn, -) -> Json { - let data: OrgBulkIds = data.into_inner(); +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + let data: BulkMembershipIds = data.into_inner(); let mut bulk_response = Vec::new(); - for org_user_id in data.ids { - let err_msg = match _reinvite_user(org_id, &org_user_id, &headers.user.email, &mut conn).await { + for member_id in data.ids { + let err_msg = match _reinvite_member(&org_id, &member_id, &headers.user.email, &mut conn).await { Ok(_) => String::new(), Err(e) => format!("{e:?}"), }; @@ -988,37 +1168,45 @@ async fn bulk_reinvite_user( bulk_response.push(json!( { "object": "OrganizationBulkConfirmResponseModel", - "id": org_user_id, + "id": member_id, "error": err_msg } )) } - Json(json!({ + Ok(Json(json!({ "data": bulk_response, "object": "list", "continuationToken": null - })) + }))) } -#[post("/organizations//users//reinvite")] -async fn reinvite_user(org_id: &str, user_org: &str, headers: AdminHeaders, mut conn: DbConn) -> EmptyResult { - _reinvite_user(org_id, user_org, &headers.user.email, &mut conn).await +#[post("/organizations//users//reinvite")] +async fn reinvite_member( + org_id: OrganizationId, + member_id: MembershipId, + headers: AdminHeaders, + mut conn: DbConn, +) -> EmptyResult { + _reinvite_member(&org_id, &member_id, &headers.user.email, &mut conn).await } -async fn _reinvite_user(org_id: &str, user_org: &str, invited_by_email: &str, conn: &mut DbConn) -> EmptyResult { - let user_org = match UserOrganization::find_by_uuid(user_org, conn).await { - Some(user_org) => user_org, - None => err!("The user hasn't been invited to the organization."), +async fn _reinvite_member( + org_id: &OrganizationId, + member_id: &MembershipId, + invited_by_email: &str, + conn: &mut DbConn, +) -> EmptyResult { + let Some(member) = Membership::find_by_uuid_and_org(member_id, org_id, conn).await else { + err!("The user hasn't been invited to the organization.") }; - if user_org.status != UserOrgStatus::Invited as i32 { + if member.status != MembershipStatus::Invited as i32 { err!("The user is already accepted or confirmed to the organization") } - let user = match User::find_by_uuid(&user_org.user_uuid, conn).await { - Some(user) => user, - None => err!("User not found."), + let Some(user) = User::find_by_uuid(&member.user_uuid, conn).await else { + err!("User not found.") }; if !CONFIG.invitations_allowed() && user.password_hash.is_empty() { @@ -1031,22 +1219,15 @@ async fn _reinvite_user(org_id: &str, user_org: &str, invited_by_email: &str, co }; if CONFIG.mail_enabled() { - mail::send_invite( - &user, - Some(org_id.to_string()), - Some(user_org.uuid), - &org_name, - Some(invited_by_email.to_string()), - ) - .await?; + mail::send_invite(&user, org_id.clone(), member.uuid, &org_name, Some(invited_by_email.to_string())).await?; } else if user.password_hash.is_empty() { let invitation = Invitation::new(&user.email); invitation.save(conn).await?; } else { - let _ = Invitation::take(&user.email, conn).await; - let mut user_org = user_org; - user_org.status = UserOrgStatus::Accepted as i32; - user_org.save(conn).await?; + Invitation::take(&user.email, conn).await; + let mut member = member; + member.status = MembershipStatus::Accepted as i32; + member.save(conn).await?; } Ok(()) @@ -1059,74 +1240,86 @@ struct AcceptData { reset_password_key: Option, } -#[post("/organizations//users/<_org_user_id>/accept", data = "")] -async fn accept_invite(org_id: &str, _org_user_id: &str, data: Json, mut conn: DbConn) -> EmptyResult { - // The web-vault passes org_id and org_user_id in the URL, but we are just reading them from the JWT instead +#[post("/organizations//users//accept", data = "")] +async fn accept_invite( + org_id: OrganizationId, + member_id: MembershipId, + data: Json, + headers: Headers, + mut conn: DbConn, +) -> EmptyResult { + // The web-vault passes org_id and member_id in the URL, but we are just reading them from the JWT instead let data: AcceptData = data.into_inner(); let claims = decode_invite(&data.token)?; - match User::find_by_mail(&claims.email, &mut conn).await { - Some(user) => { - Invitation::take(&claims.email, &mut conn).await; + // Don't allow other users from accepting an invitation. + if !claims.email.eq(&headers.user.email) { + err!("Invitation was issued to a different account", "Claim does not match user_id") + } - if let (Some(user_org), Some(org)) = (&claims.user_org_id, &claims.org_id) { - let mut user_org = match UserOrganization::find_by_uuid_and_org(user_org, org, &mut conn).await { - Some(user_org) => user_org, - None => err!("Error accepting the invitation"), - }; + // If a claim does not have a member_id or it does not match the one in from the URI, something is wrong. + if !claims.member_id.eq(&member_id) { + err!("Error accepting the invitation", "Claim does not match the member_id") + } - if user_org.status != UserOrgStatus::Invited as i32 { - err!("User already accepted the invitation") - } + let member = &claims.member_id; + let org = &claims.org_id; - let master_password_required = OrgPolicy::org_is_reset_password_auto_enroll(org, &mut conn).await; - if data.reset_password_key.is_none() && master_password_required { - err!("Reset password key is required, but not provided."); - } + Invitation::take(&claims.email, &mut conn).await; - // This check is also done at accept_invite(), _confirm_invite, _activate_user(), edit_user(), admin::update_user_org_type - // It returns different error messages per function. - if user_org.atype < UserOrgType::Admin { - match OrgPolicy::is_user_allowed(&user_org.user_uuid, org_id, false, &mut conn).await { - Ok(_) => {} - Err(OrgPolicyErr::TwoFactorMissing) => { - if CONFIG.email_2fa_auto_fallback() { - two_factor::email::activate_email_2fa(&user, &mut conn).await?; - } else { - err!("You cannot join this organization until you enable two-step login on your user account"); - } - } - Err(OrgPolicyErr::SingleOrgEnforced) => { - err!("You cannot join this organization because you are a member of an organization which forbids it"); - } + // skip invitation logic when we were invited via the /admin panel + if **member != FAKE_ADMIN_UUID { + let Some(mut member) = Membership::find_by_uuid_and_org(member, org, &mut conn).await else { + err!("Error accepting the invitation") + }; + + if member.status != MembershipStatus::Invited as i32 { + err!("User already accepted the invitation") + } + + let master_password_required = OrgPolicy::org_is_reset_password_auto_enroll(org, &mut conn).await; + if data.reset_password_key.is_none() && master_password_required { + err!("Reset password key is required, but not provided."); + } + + // This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type + // It returns different error messages per function. + if member.atype < MembershipType::Admin { + match OrgPolicy::is_user_allowed(&member.user_uuid, &org_id, false, &mut conn).await { + Ok(_) => {} + Err(OrgPolicyErr::TwoFactorMissing) => { + if CONFIG.email_2fa_auto_fallback() { + two_factor::email::activate_email_2fa(&headers.user, &mut conn).await?; + } else { + err!("You cannot join this organization until you enable two-step login on your user account"); } } - - user_org.status = UserOrgStatus::Accepted as i32; - - if master_password_required { - user_org.reset_password_key = data.reset_password_key; + Err(OrgPolicyErr::SingleOrgEnforced) => { + err!("You cannot join this organization because you are a member of an organization which forbids it"); } - - user_org.save(&mut conn).await?; } } - None => err!("Invited user not found"), + + member.status = MembershipStatus::Accepted as i32; + + if master_password_required { + member.reset_password_key = data.reset_password_key; + } + + member.save(&mut conn).await?; } if CONFIG.mail_enabled() { - let mut org_name = CONFIG.invitation_org_name(); - if let Some(org_id) = &claims.org_id { - org_name = match Organization::find_by_uuid(org_id, &mut conn).await { + if let Some(invited_by_email) = &claims.invited_by_email { + let org_name = match Organization::find_by_uuid(&claims.org_id, &mut conn).await { Some(org) => org.name, None => err!("Organization not found."), }; - }; - if let Some(invited_by_email) = &claims.invited_by_email { // User was invited to an organization, so they must be confirmed manually after acceptance mail::send_invite_accepted(&claims.email, invited_by_email, &org_name).await?; } else { // User was invited from /admin, so they are automatically confirmed + let org_name = CONFIG.invitation_org_name(); mail::send_invite_confirmed(&claims.email, &org_name).await?; } } @@ -1137,7 +1330,7 @@ async fn accept_invite(org_id: &str, _org_user_id: &str, data: Json, #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct ConfirmData { - id: Option, + id: Option, key: Option, } @@ -1149,21 +1342,24 @@ struct BulkConfirmData { #[post("/organizations//users/confirm", data = "")] async fn bulk_confirm_invite( - org_id: &str, + org_id: OrganizationId, data: Json, headers: AdminHeaders, mut conn: DbConn, nt: Notify<'_>, -) -> Json { +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } let data = data.into_inner(); let mut bulk_response = Vec::new(); match data.keys { Some(keys) => { for invite in keys { - let org_user_id = invite.id.unwrap_or_default(); + let member_id = invite.id.unwrap(); let user_key = invite.key.unwrap_or_default(); - let err_msg = match _confirm_invite(org_id, &org_user_id, &user_key, &headers, &mut conn, &nt).await { + let err_msg = match _confirm_invite(&org_id, &member_id, &user_key, &headers, &mut conn, &nt).await { Ok(_) => String::new(), Err(e) => format!("{e:?}"), }; @@ -1171,7 +1367,7 @@ async fn bulk_confirm_invite( bulk_response.push(json!( { "object": "OrganizationBulkConfirmResponseModel", - "id": org_user_id, + "id": member_id, "error": err_msg } )); @@ -1180,17 +1376,17 @@ async fn bulk_confirm_invite( None => error!("No keys to confirm"), } - Json(json!({ + Ok(Json(json!({ "data": bulk_response, "object": "list", "continuationToken": null - })) + }))) } -#[post("/organizations//users//confirm", data = "")] +#[post("/organizations//users//confirm", data = "")] async fn confirm_invite( - org_id: &str, - org_user_id: &str, + org_id: OrganizationId, + member_id: MembershipId, data: Json, headers: AdminHeaders, mut conn: DbConn, @@ -1198,42 +1394,41 @@ async fn confirm_invite( ) -> EmptyResult { let data = data.into_inner(); let user_key = data.key.unwrap_or_default(); - _confirm_invite(org_id, org_user_id, &user_key, &headers, &mut conn, &nt).await + _confirm_invite(&org_id, &member_id, &user_key, &headers, &mut conn, &nt).await } async fn _confirm_invite( - org_id: &str, - org_user_id: &str, + org_id: &OrganizationId, + member_id: &MembershipId, key: &str, headers: &AdminHeaders, conn: &mut DbConn, nt: &Notify<'_>, ) -> EmptyResult { - if key.is_empty() || org_user_id.is_empty() { + if key.is_empty() || member_id.is_empty() { err!("Key or UserId is not set, unable to process request"); } - let mut user_to_confirm = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { - Some(user) => user, - None => err!("The specified user isn't a member of the organization"), + let Some(mut member_to_confirm) = Membership::find_by_uuid_and_org(member_id, org_id, conn).await else { + err!("The specified user isn't a member of the organization") }; - if user_to_confirm.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner { + if member_to_confirm.atype != MembershipType::User && headers.membership_type != MembershipType::Owner { err!("Only Owners can confirm Managers, Admins or Owners") } - if user_to_confirm.status != UserOrgStatus::Accepted as i32 { + if member_to_confirm.status != MembershipStatus::Accepted as i32 { err!("User in invalid state") } - // This check is also done at accept_invite(), _confirm_invite, _activate_user(), edit_user(), admin::update_user_org_type + // This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type // It returns different error messages per function. - if user_to_confirm.atype < UserOrgType::Admin { - match OrgPolicy::is_user_allowed(&user_to_confirm.user_uuid, org_id, true, conn).await { + if member_to_confirm.atype < MembershipType::Admin { + match OrgPolicy::is_user_allowed(&member_to_confirm.user_uuid, org_id, true, conn).await { Ok(_) => {} Err(OrgPolicyErr::TwoFactorMissing) => { if CONFIG.email_2fa_auto_fallback() { - two_factor::email::find_and_activate_email_2fa(&user_to_confirm.user_uuid, conn).await?; + two_factor::email::find_and_activate_email_2fa(&member_to_confirm.user_uuid, conn).await?; } else { err!("You cannot confirm this user because they have not setup 2FA"); } @@ -1244,12 +1439,12 @@ async fn _confirm_invite( } } - user_to_confirm.status = UserOrgStatus::Confirmed as i32; - user_to_confirm.akey = key.to_string(); + member_to_confirm.status = MembershipStatus::Confirmed as i32; + member_to_confirm.akey = key.to_string(); log_event( EventType::OrganizationUserConfirmed as i32, - &user_to_confirm.uuid, + &member_to_confirm.uuid, org_id, &headers.user.uuid, headers.device.atype, @@ -1263,33 +1458,56 @@ async fn _confirm_invite( Some(org) => org.name, None => err!("Error looking up organization."), }; - let address = match User::find_by_uuid(&user_to_confirm.user_uuid, conn).await { + let address = match User::find_by_uuid(&member_to_confirm.user_uuid, conn).await { Some(user) => user.email, None => err!("Error looking up user."), }; mail::send_invite_confirmed(&address, &org_name).await?; } - let save_result = user_to_confirm.save(conn).await; + let save_result = member_to_confirm.save(conn).await; - if let Some(user) = User::find_by_uuid(&user_to_confirm.user_uuid, conn).await { + if let Some(user) = User::find_by_uuid(&member_to_confirm.user_uuid, conn).await { nt.send_user_update(UpdateType::SyncOrgKeys, &user).await; } save_result } -#[get("/organizations//users/?")] -async fn get_user( - org_id: &str, - org_user_id: &str, - data: GetOrgUserData, - _headers: AdminHeaders, +#[get("/organizations//users/mini-details", rank = 1)] +async fn get_org_user_mini_details( + org_id: OrganizationId, + headers: ManagerHeadersLoose, mut conn: DbConn, ) -> JsonResult { - let user = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &mut conn).await { - Some(user) => user, - None => err!("The specified user isn't a member of the organization"), + if org_id != headers.membership.org_uuid { + err!("Organization not found", "Organization id's do not match"); + } + let mut members_json = Vec::new(); + for m in Membership::find_by_org(&org_id, &mut conn).await { + members_json.push(m.to_json_mini_details(&mut conn).await); + } + + Ok(Json(json!({ + "data": members_json, + "object": "list", + "continuationToken": null, + }))) +} + +#[get("/organizations//users/?", rank = 2)] +async fn get_user( + org_id: OrganizationId, + member_id: MembershipId, + data: GetOrgUserData, + headers: AdminHeaders, + mut conn: DbConn, +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + let Some(user) = Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await else { + err!("The specified user isn't a member of the organization") }; // In this case, when groups are requested we also need to include collections. @@ -1305,71 +1523,90 @@ async fn get_user( struct EditUserData { r#type: NumberOrString, collections: Option>, - groups: Option>, + groups: Option>, #[serde(default)] access_all: bool, + #[serde(default)] + permissions: HashMap, } -#[put("/organizations//users/", data = "", rank = 1)] -async fn put_organization_user( - org_id: &str, - org_user_id: &str, +#[put("/organizations//users/", data = "", rank = 1)] +async fn put_member( + org_id: OrganizationId, + member_id: MembershipId, data: Json, headers: AdminHeaders, conn: DbConn, ) -> EmptyResult { - edit_user(org_id, org_user_id, data, headers, conn).await + edit_member(org_id, member_id, data, headers, conn).await } -#[post("/organizations//users/", data = "", rank = 1)] -async fn edit_user( - org_id: &str, - org_user_id: &str, +#[post("/organizations//users/", data = "", rank = 1)] +async fn edit_member( + org_id: OrganizationId, + member_id: MembershipId, data: Json, headers: AdminHeaders, mut conn: DbConn, ) -> EmptyResult { - let data: EditUserData = data.into_inner(); + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + let mut data: EditUserData = data.into_inner(); - let new_type = match UserOrgType::from_str(&data.r#type.into_string()) { - Some(new_type) => new_type, - None => err!("Invalid type"), + // HACK: We need the raw user-type to be sure custom role is selected to determine the access_all permission + // The from_str() will convert the custom role type into a manager role type + let raw_type = &data.r#type.into_string(); + // MembershipTyp::from_str will convert custom (4) to manager (3) + let Some(new_type) = MembershipType::from_str(raw_type) else { + err!("Invalid type") }; - let mut user_to_edit = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &mut conn).await { - Some(user) => user, + // HACK: This converts the Custom role which has the `Manage all collections` box checked into an access_all flag + // Since the parent checkbox is not sent to the server we need to check and verify the child checkboxes + // If the box is not checked, the user will still be a manager, but not with the access_all permission + if raw_type.eq("4") + && data.permissions.get("editAnyCollection") == Some(&json!(true)) + && data.permissions.get("deleteAnyCollection") == Some(&json!(true)) + && data.permissions.get("createNewCollections") == Some(&json!(true)) + { + data.access_all = true; + } + + let mut member_to_edit = match Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await { + Some(member) => member, None => err!("The specified user isn't member of the organization"), }; - if new_type != user_to_edit.atype - && (user_to_edit.atype >= UserOrgType::Admin || new_type >= UserOrgType::Admin) - && headers.org_user_type != UserOrgType::Owner + if new_type != member_to_edit.atype + && (member_to_edit.atype >= MembershipType::Admin || new_type >= MembershipType::Admin) + && headers.membership_type != MembershipType::Owner { err!("Only Owners can grant and remove Admin or Owner privileges") } - if user_to_edit.atype == UserOrgType::Owner && headers.org_user_type != UserOrgType::Owner { + if member_to_edit.atype == MembershipType::Owner && headers.membership_type != MembershipType::Owner { err!("Only Owners can edit Owner users") } - if user_to_edit.atype == UserOrgType::Owner - && new_type != UserOrgType::Owner - && user_to_edit.status == UserOrgStatus::Confirmed as i32 + if member_to_edit.atype == MembershipType::Owner + && new_type != MembershipType::Owner + && member_to_edit.status == MembershipStatus::Confirmed as i32 { // Removing owner permission, check that there is at least one other confirmed owner - if UserOrganization::count_confirmed_by_org_and_type(org_id, UserOrgType::Owner, &mut conn).await <= 1 { + if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &mut conn).await <= 1 { err!("Can't delete the last owner") } } - // This check is also done at accept_invite(), _confirm_invite, _activate_user(), edit_user(), admin::update_user_org_type + // This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type // It returns different error messages per function. - if new_type < UserOrgType::Admin { - match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, org_id, true, &mut conn).await { + if new_type < MembershipType::Admin { + match OrgPolicy::is_user_allowed(&member_to_edit.user_uuid, &org_id, true, &mut conn).await { Ok(_) => {} Err(OrgPolicyErr::TwoFactorMissing) => { if CONFIG.email_2fa_auto_fallback() { - two_factor::email::find_and_activate_email_2fa(&user_to_edit.user_uuid, &mut conn).await?; + two_factor::email::find_and_activate_email_2fa(&member_to_edit.user_uuid, &mut conn).await?; } else { err!("You cannot modify this user to this type because they have not setup 2FA"); } @@ -1380,25 +1617,26 @@ async fn edit_user( } } - user_to_edit.access_all = data.access_all; - user_to_edit.atype = new_type as i32; + member_to_edit.access_all = data.access_all; + member_to_edit.atype = new_type as i32; // Delete all the odd collections - for c in CollectionUser::find_by_organization_and_user_uuid(org_id, &user_to_edit.user_uuid, &mut conn).await { + for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &member_to_edit.user_uuid, &mut conn).await { c.delete(&mut conn).await?; } // If no accessAll, add the collections received if !data.access_all { for col in data.collections.iter().flatten() { - match Collection::find_by_uuid_and_org(&col.id, org_id, &mut conn).await { + match Collection::find_by_uuid_and_org(&col.id, &org_id, &mut conn).await { None => err!("Collection not found in Organization"), Some(collection) => { CollectionUser::save( - &user_to_edit.user_uuid, + &member_to_edit.user_uuid, &collection.uuid, col.read_only, col.hide_passwords, + col.manage, &mut conn, ) .await?; @@ -1407,17 +1645,17 @@ async fn edit_user( } } - GroupUser::delete_all_by_user(&user_to_edit.uuid, &mut conn).await?; + GroupUser::delete_all_by_member(&member_to_edit.uuid, &mut conn).await?; - for group in data.groups.iter().flatten() { - let mut group_entry = GroupUser::new(String::from(group), user_to_edit.uuid.clone()); + for group_id in data.groups.iter().flatten() { + let mut group_entry = GroupUser::new(group_id.clone(), member_to_edit.uuid.clone()); group_entry.save(&mut conn).await?; } log_event( EventType::OrganizationUserUpdated as i32, - &user_to_edit.uuid, - org_id, + &member_to_edit.uuid, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -1425,22 +1663,25 @@ async fn edit_user( ) .await; - user_to_edit.save(&mut conn).await + member_to_edit.save(&mut conn).await } #[delete("/organizations//users", data = "")] -async fn bulk_delete_user( - org_id: &str, - data: Json, +async fn bulk_delete_member( + org_id: OrganizationId, + data: Json, headers: AdminHeaders, mut conn: DbConn, nt: Notify<'_>, -) -> Json { - let data: OrgBulkIds = data.into_inner(); +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + let data: BulkMembershipIds = data.into_inner(); let mut bulk_response = Vec::new(); - for org_user_id in data.ids { - let err_msg = match _delete_user(org_id, &org_user_id, &headers, &mut conn, &nt).await { + for member_id in data.ids { + let err_msg = match _delete_member(&org_id, &member_id, &headers, &mut conn, &nt).await { Ok(_) => String::new(), Err(e) => format!("{e:?}"), }; @@ -1448,67 +1689,67 @@ async fn bulk_delete_user( bulk_response.push(json!( { "object": "OrganizationBulkConfirmResponseModel", - "id": org_user_id, + "id": member_id, "error": err_msg } )) } - Json(json!({ + Ok(Json(json!({ "data": bulk_response, "object": "list", "continuationToken": null - })) + }))) } -#[delete("/organizations//users/")] -async fn delete_user( - org_id: &str, - org_user_id: &str, +#[delete("/organizations//users/")] +async fn delete_member( + org_id: OrganizationId, + member_id: MembershipId, headers: AdminHeaders, mut conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { - _delete_user(org_id, org_user_id, &headers, &mut conn, &nt).await + _delete_member(&org_id, &member_id, &headers, &mut conn, &nt).await } -#[post("/organizations//users//delete")] -async fn post_delete_user( - org_id: &str, - org_user_id: &str, +#[post("/organizations//users//delete")] +async fn post_delete_member( + org_id: OrganizationId, + member_id: MembershipId, headers: AdminHeaders, mut conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { - _delete_user(org_id, org_user_id, &headers, &mut conn, &nt).await + _delete_member(&org_id, &member_id, &headers, &mut conn, &nt).await } -async fn _delete_user( - org_id: &str, - org_user_id: &str, +async fn _delete_member( + org_id: &OrganizationId, + member_id: &MembershipId, headers: &AdminHeaders, conn: &mut DbConn, nt: &Notify<'_>, ) -> EmptyResult { - let user_to_delete = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { - Some(user) => user, - None => err!("User to delete isn't member of the organization"), + let Some(member_to_delete) = Membership::find_by_uuid_and_org(member_id, org_id, conn).await else { + err!("User to delete isn't member of the organization") }; - if user_to_delete.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner { + if member_to_delete.atype != MembershipType::User && headers.membership_type != MembershipType::Owner { err!("Only Owners can delete Admins or Owners") } - if user_to_delete.atype == UserOrgType::Owner && user_to_delete.status == UserOrgStatus::Confirmed as i32 { + if member_to_delete.atype == MembershipType::Owner && member_to_delete.status == MembershipStatus::Confirmed as i32 + { // Removing owner, check that there is at least one other confirmed owner - if UserOrganization::count_confirmed_by_org_and_type(org_id, UserOrgType::Owner, conn).await <= 1 { + if Membership::count_confirmed_by_org_and_type(org_id, MembershipType::Owner, conn).await <= 1 { err!("Can't delete the last owner") } } log_event( EventType::OrganizationUserRemoved as i32, - &user_to_delete.uuid, + &member_to_delete.uuid, org_id, &headers.user.uuid, headers.device.atype, @@ -1517,48 +1758,51 @@ async fn _delete_user( ) .await; - if let Some(user) = User::find_by_uuid(&user_to_delete.user_uuid, conn).await { + if let Some(user) = User::find_by_uuid(&member_to_delete.user_uuid, conn).await { nt.send_user_update(UpdateType::SyncOrgKeys, &user).await; } - user_to_delete.delete(conn).await + member_to_delete.delete(conn).await } #[post("/organizations//users/public-keys", data = "")] async fn bulk_public_keys( - org_id: &str, - data: Json, - _headers: AdminHeaders, + org_id: OrganizationId, + data: Json, + headers: AdminHeaders, mut conn: DbConn, -) -> Json { - let data: OrgBulkIds = data.into_inner(); +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + let data: BulkMembershipIds = data.into_inner(); let mut bulk_response = Vec::new(); - // Check all received UserOrg UUID's and find the matching User to retrieve the public-key. - // If the user does not exists, just ignore it, and do not return any information regarding that UserOrg UUID. + // Check all received Membership UUID's and find the matching User to retrieve the public-key. + // If the user does not exists, just ignore it, and do not return any information regarding that Membership UUID. // The web-vault will then ignore that user for the following steps. - for user_org_id in data.ids { - match UserOrganization::find_by_uuid_and_org(&user_org_id, org_id, &mut conn).await { - Some(user_org) => match User::find_by_uuid(&user_org.user_uuid, &mut conn).await { + for member_id in data.ids { + match Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await { + Some(member) => match User::find_by_uuid(&member.user_uuid, &mut conn).await { Some(user) => bulk_response.push(json!( { "object": "organizationUserPublicKeyResponseModel", - "id": user_org_id, + "id": member_id, "userId": user.uuid, "key": user.public_key } )), None => debug!("User doesn't exist"), }, - None => debug!("UserOrg doesn't exist"), + None => debug!("Membership doesn't exist"), } } - Json(json!({ + Ok(Json(json!({ "data": bulk_response, "object": "list", "continuationToken": null - })) + }))) } use super::ciphers::update_cipher_from_data; @@ -1568,7 +1812,7 @@ use super::ciphers::CipherData; #[serde(rename_all = "camelCase")] struct ImportData { ciphers: Vec, - collections: Vec, + collections: Vec, collection_relationships: Vec, } @@ -1598,14 +1842,14 @@ async fn post_org_import( // TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks. Cipher::validate_cipher_data(&data.ciphers)?; - let existing_collections: HashSet> = - Collection::find_by_organization(&org_id, &mut conn).await.into_iter().map(|c| (Some(c.uuid))).collect(); - let mut collections: Vec = Vec::with_capacity(data.collections.len()); - for coll in data.collections { - let collection_uuid = if existing_collections.contains(&coll.id) { - coll.id.unwrap() + let existing_collections: HashSet> = + Collection::find_by_organization(&org_id, &mut conn).await.into_iter().map(|c| Some(c.uuid)).collect(); + let mut collections: Vec = Vec::with_capacity(data.collections.len()); + for col in data.collections { + let collection_uuid = if existing_collections.contains(&col.id) { + col.id.unwrap() } else { - let new_collection = Collection::new(org_id.clone(), coll.name, coll.external_id); + let new_collection = Collection::new(org_id.clone(), col.name, col.external_id); new_collection.save(&mut conn).await?; new_collection.uuid }; @@ -1622,7 +1866,7 @@ async fn post_org_import( let headers: Headers = headers.into(); - let mut ciphers: Vec = Vec::with_capacity(data.ciphers.len()); + let mut ciphers: Vec = Vec::with_capacity(data.ciphers.len()); for mut cipher_data in data.ciphers { // Always clear folder_id's via an organization import cipher_data.folder_id = None; @@ -1632,10 +1876,10 @@ async fn post_org_import( } // Assign the collections - for (cipher_index, coll_index) in relations { + for (cipher_index, col_index) in relations { let cipher_id = &ciphers[cipher_index]; - let coll_id = &collections[coll_index]; - CollectionCipher::save(cipher_id, coll_id, &mut conn).await?; + let col_id = &collections[col_index]; + CollectionCipher::save(cipher_id, col_id, &mut conn).await?; } let mut user = headers.user; @@ -1646,13 +1890,13 @@ async fn post_org_import( #[serde(rename_all = "camelCase")] #[allow(dead_code)] struct BulkCollectionsData { - organization_id: String, - cipher_ids: Vec, - collection_ids: HashSet, + organization_id: OrganizationId, + cipher_ids: Vec, + collection_ids: HashSet, remove_collections: bool, } -// This endpoint is only reachable via the organization view, therefor this endpoint is located here +// This endpoint is only reachable via the organization view, therefore this endpoint is located here // Also Bitwarden does not send out Notifications for these changes, it only does this for individual cipher collection updates #[post("/ciphers/bulk-collections", data = "")] async fn post_bulk_collections(data: Json, headers: Headers, mut conn: DbConn) -> EmptyResult { @@ -1666,7 +1910,7 @@ async fn post_bulk_collections(data: Json, headers: Headers // Get all the collection available to the user in one query // Also filter based upon the provided collections - let user_collections: HashMap = + let user_collections: HashMap = Collection::find_by_organization_and_user_uuid(&data.organization_id, &headers.user.uuid, &mut conn) .await .into_iter() @@ -1703,39 +1947,35 @@ async fn post_bulk_collections(data: Json, headers: Headers } #[get("/organizations//policies")] -async fn list_policies(org_id: &str, _headers: AdminHeaders, mut conn: DbConn) -> Json { - let policies = OrgPolicy::find_by_org(org_id, &mut conn).await; +async fn list_policies(org_id: OrganizationId, headers: AdminHeaders, mut conn: DbConn) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + let policies = OrgPolicy::find_by_org(&org_id, &mut conn).await; let policies_json: Vec = policies.iter().map(OrgPolicy::to_json).collect(); - Json(json!({ + Ok(Json(json!({ "data": policies_json, "object": "list", "continuationToken": null - })) + }))) } #[get("/organizations//policies/token?")] -async fn list_policies_token(org_id: &str, token: &str, mut conn: DbConn) -> JsonResult { - // web-vault 2024.6.2 seems to send these values and cause logs to output errors - // Catch this and prevent errors in the logs - // TODO: CleanUp after 2024.6.x is not used anymore. - if org_id == "undefined" && token == "undefined" { - return Ok(Json(json!({}))); - } - +async fn list_policies_token(org_id: OrganizationId, token: &str, mut conn: DbConn) -> JsonResult { let invite = decode_invite(token)?; - let invite_org_id = match invite.org_id { - Some(invite_org_id) => invite_org_id, - None => err!("Invalid token"), - }; - - if invite_org_id != org_id { + if invite.org_id != org_id { err!("Token doesn't match request organization"); } + // exit early when we have been invited via /admin panel + if org_id.as_ref() == FAKE_ADMIN_UUID { + return Ok(Json(json!({}))); + } + // TODO: We receive the invite token as ?token=<>, validate it contains the org id - let policies = OrgPolicy::find_by_org(org_id, &mut conn).await; + let policies = OrgPolicy::find_by_org(&org_id, &mut conn).await; let policies_json: Vec = policies.iter().map(OrgPolicy::to_json).collect(); Ok(Json(json!({ @@ -1746,15 +1986,17 @@ async fn list_policies_token(org_id: &str, token: &str, mut conn: DbConn) -> Jso } #[get("/organizations//policies/")] -async fn get_policy(org_id: &str, pol_type: i32, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { - let pol_type_enum = match OrgPolicyType::from_i32(pol_type) { - Some(pt) => pt, - None => err!("Invalid or unsupported policy type"), +async fn get_policy(org_id: OrganizationId, pol_type: i32, headers: AdminHeaders, mut conn: DbConn) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + let Some(pol_type_enum) = OrgPolicyType::from_i32(pol_type) else { + err!("Invalid or unsupported policy type") }; - let policy = match OrgPolicy::find_by_org_and_type(org_id, pol_type_enum, &mut conn).await { + let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type_enum, &mut conn).await { Some(p) => p, - None => OrgPolicy::new(String::from(org_id), pol_type_enum, "null".to_string()), + None => OrgPolicy::new(org_id.clone(), pol_type_enum, "null".to_string()), }; Ok(Json(policy.to_json())) @@ -1770,17 +2012,19 @@ struct PolicyData { #[put("/organizations//policies/", data = "")] async fn put_policy( - org_id: &str, + org_id: OrganizationId, pol_type: i32, data: Json, headers: AdminHeaders, mut conn: DbConn, ) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } let data: PolicyData = data.into_inner(); - let pol_type_enum = match OrgPolicyType::from_i32(pol_type) { - Some(pt) => pt, - None => err!("Invalid or unsupported policy type"), + let Some(pol_type_enum) = OrgPolicyType::from_i32(pol_type) else { + err!("Invalid or unsupported policy type") }; // Bitwarden only allows the Reset Password policy when Single Org policy is enabled @@ -1791,7 +2035,7 @@ async fn put_policy( if CONFIG.enforce_single_org_with_reset_pw_policy() { if pol_type_enum == OrgPolicyType::ResetPassword && data.enabled { let single_org_policy_enabled = - match OrgPolicy::find_by_org_and_type(org_id, OrgPolicyType::SingleOrg, &mut conn).await { + match OrgPolicy::find_by_org_and_type(&org_id, OrgPolicyType::SingleOrg, &mut conn).await { Some(p) => p.enabled, None => false, }; @@ -1804,7 +2048,7 @@ async fn put_policy( // Also prevent the Single Org Policy to be disabled if the Reset Password policy is enabled if pol_type_enum == OrgPolicyType::SingleOrg && !data.enabled { let reset_pw_policy_enabled = - match OrgPolicy::find_by_org_and_type(org_id, OrgPolicyType::ResetPassword, &mut conn).await { + match OrgPolicy::find_by_org_and_type(&org_id, OrgPolicyType::ResetPassword, &mut conn).await { Some(p) => p.enabled, None => false, }; @@ -1818,7 +2062,7 @@ async fn put_policy( // When enabling the TwoFactorAuthentication policy, revoke all members that do not have 2FA if pol_type_enum == OrgPolicyType::TwoFactorAuthentication && data.enabled { two_factor::enforce_2fa_policy_for_org( - org_id, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -1829,14 +2073,14 @@ async fn put_policy( // When enabling the SingleOrg policy, remove this org's members that are members of other orgs if pol_type_enum == OrgPolicyType::SingleOrg && data.enabled { - for member in UserOrganization::find_by_org(org_id, &mut conn).await.into_iter() { + for member in Membership::find_by_org(&org_id, &mut conn).await.into_iter() { // Policy only applies to non-Owner/non-Admin members who have accepted joining the org // Exclude invited and revoked users when checking for this policy. // Those users will not be allowed to accept or be activated because of the policy checks done there. // We check if the count is larger then 1, because it includes this organization also. - if member.atype < UserOrgType::Admin - && member.status != UserOrgStatus::Invited as i32 - && UserOrganization::count_accepted_and_confirmed_by_user(&member.user_uuid, &mut conn).await > 1 + if member.atype < MembershipType::Admin + && member.status != MembershipStatus::Invited as i32 + && Membership::count_accepted_and_confirmed_by_user(&member.user_uuid, &mut conn).await > 1 { if CONFIG.mail_enabled() { let org = Organization::find_by_uuid(&member.org_uuid, &mut conn).await.unwrap(); @@ -1848,7 +2092,7 @@ async fn put_policy( log_event( EventType::OrganizationUserRemoved as i32, &member.uuid, - org_id, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -1861,9 +2105,9 @@ async fn put_policy( } } - let mut policy = match OrgPolicy::find_by_org_and_type(org_id, pol_type_enum, &mut conn).await { + let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type_enum, &mut conn).await { Some(p) => p, - None => OrgPolicy::new(String::from(org_id), pol_type_enum, "{}".to_string()), + None => OrgPolicy::new(org_id.clone(), pol_type_enum, "{}".to_string()), }; policy.enabled = data.enabled; @@ -1872,8 +2116,8 @@ async fn put_policy( log_event( EventType::PolicyUpdated as i32, - &policy.uuid, - org_id, + policy.uuid.as_ref(), + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -1886,7 +2130,7 @@ async fn put_policy( #[allow(unused_variables)] #[get("/organizations//tax")] -fn get_organization_tax(org_id: &str, _headers: Headers) -> Json { +fn get_organization_tax(org_id: OrganizationId, _headers: Headers) -> Json { // Prevent a 404 error, which also causes Javascript errors. // Upstream sends "Only allowed when not self hosted." As an error message. // If we do the same it will also output this to the log, which is overkill. @@ -1933,6 +2177,12 @@ fn get_plans_tax_rates(_headers: Headers) -> Json { Json(_empty_data_json()) } +#[get("/organizations/<_org_id>/billing/metadata")] +fn get_billing_metadata(_org_id: OrganizationId, _headers: Headers) -> Json { + // Prevent a 404 error, which also causes Javascript errors. + Json(_empty_data_json()) +} + fn _empty_data_json() -> Value { json!({ "object": "list", @@ -1970,8 +2220,11 @@ struct OrgImportData { users: Vec, } +/// This function seems to be deprected +/// It is only used with older directory connectors +/// TODO: Cleanup Tech debt #[post("/organizations//import", data = "")] -async fn import(org_id: &str, data: Json, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn import(org_id: OrganizationId, data: Json, headers: Headers, mut conn: DbConn) -> EmptyResult { let data = data.into_inner(); // TODO: Currently we aren't storing the externalId's anywhere, so we also don't have a way @@ -1980,8 +2233,8 @@ async fn import(org_id: &str, data: Json, headers: Headers, mut c // as opposed to upstream which only removes auto-imported users. // User needs to be admin or owner to use the Directory Connector - match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await { - Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ } + match Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &mut conn).await { + Some(member) if member.atype >= MembershipType::Admin => { /* Okay, nothing to do */ } Some(_) => err!("User has insufficient permissions to use Directory Connector"), None => err!("User not part of organization"), }; @@ -1989,11 +2242,11 @@ async fn import(org_id: &str, data: Json, headers: Headers, mut c for user_data in &data.users { if user_data.deleted { // If user is marked for deletion and it exists, delete it - if let Some(user_org) = UserOrganization::find_by_email_and_org(&user_data.email, org_id, &mut conn).await { + if let Some(member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await { log_event( EventType::OrganizationUserRemoved as i32, - &user_org.uuid, - org_id, + &member.uuid, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -2001,64 +2254,66 @@ async fn import(org_id: &str, data: Json, headers: Headers, mut c ) .await; - user_org.delete(&mut conn).await?; + member.delete(&mut conn).await?; } // If user is not part of the organization, but it exists - } else if UserOrganization::find_by_email_and_org(&user_data.email, org_id, &mut conn).await.is_none() { + } else if Membership::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await.is_none() { if let Some(user) = User::find_by_mail(&user_data.email, &mut conn).await { - let user_org_status = if CONFIG.mail_enabled() { - UserOrgStatus::Invited as i32 + let member_status = if CONFIG.mail_enabled() { + MembershipStatus::Invited as i32 } else { - UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites + MembershipStatus::Accepted as i32 // Automatically mark user as accepted if no email invites }; - let mut new_org_user = UserOrganization::new(user.uuid.clone(), String::from(org_id)); - new_org_user.access_all = false; - new_org_user.atype = UserOrgType::User as i32; - new_org_user.status = user_org_status; - - new_org_user.save(&mut conn).await?; - - log_event( - EventType::OrganizationUserInvited as i32, - &new_org_user.uuid, - org_id, - &headers.user.uuid, - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; + let mut new_member = Membership::new(user.uuid.clone(), org_id.clone()); + new_member.access_all = false; + new_member.atype = MembershipType::User as i32; + new_member.status = member_status; if CONFIG.mail_enabled() { - let org_name = match Organization::find_by_uuid(org_id, &mut conn).await { + let org_name = match Organization::find_by_uuid(&org_id, &mut conn).await { Some(org) => org.name, None => err!("Error looking up organization"), }; mail::send_invite( &user, - Some(String::from(org_id)), - Some(new_org_user.uuid), + org_id.clone(), + new_member.uuid.clone(), &org_name, Some(headers.user.email.clone()), ) .await?; } + + // Save the member after sending an email + // If sending fails the member will not be saved to the database, and will not result in the admin needing to reinvite the users manually + new_member.save(&mut conn).await?; + + log_event( + EventType::OrganizationUserInvited as i32, + &new_member.uuid, + &org_id, + &headers.user.uuid, + headers.device.atype, + &headers.ip.ip, + &mut conn, + ) + .await; } } } // If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true) if data.overwrite_existing { - for user_org in UserOrganization::find_by_org_and_type(org_id, UserOrgType::User, &mut conn).await { - if let Some(user_email) = User::find_by_uuid(&user_org.user_uuid, &mut conn).await.map(|u| u.email) { + for member in Membership::find_by_org_and_type(&org_id, MembershipType::User, &mut conn).await { + if let Some(user_email) = User::find_by_uuid(&member.user_uuid, &mut conn).await.map(|u| u.email) { if !data.users.iter().any(|u| u.email == user_email) { log_event( EventType::OrganizationUserRemoved as i32, - &user_org.uuid, - org_id, + &member.uuid, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -2066,7 +2321,7 @@ async fn import(org_id: &str, data: Json, headers: Headers, mut c ) .await; - user_org.delete(&mut conn).await?; + member.delete(&mut conn).await?; } } } @@ -2076,57 +2331,60 @@ async fn import(org_id: &str, data: Json, headers: Headers, mut c } // Pre web-vault v2022.9.x endpoint -#[put("/organizations//users//deactivate")] -async fn deactivate_organization_user( - org_id: &str, - org_user_id: &str, +#[put("/organizations//users//deactivate")] +async fn deactivate_member( + org_id: OrganizationId, + member_id: MembershipId, headers: AdminHeaders, mut conn: DbConn, ) -> EmptyResult { - _revoke_organization_user(org_id, org_user_id, &headers, &mut conn).await -} - -// Pre web-vault v2022.9.x endpoint -#[put("/organizations//users/deactivate", data = "")] -async fn bulk_deactivate_organization_user( - org_id: &str, - data: Json, - headers: AdminHeaders, - conn: DbConn, -) -> Json { - bulk_revoke_organization_user(org_id, data, headers, conn).await -} - -#[put("/organizations//users//revoke")] -async fn revoke_organization_user( - org_id: &str, - org_user_id: &str, - headers: AdminHeaders, - mut conn: DbConn, -) -> EmptyResult { - _revoke_organization_user(org_id, org_user_id, &headers, &mut conn).await + _revoke_member(&org_id, &member_id, &headers, &mut conn).await } #[derive(Deserialize, Debug)] #[serde(rename_all = "camelCase")] -struct OrgBulkRevokeData { - ids: Option>, +struct BulkRevokeMembershipIds { + ids: Option>, +} + +// Pre web-vault v2022.9.x endpoint +#[put("/organizations//users/deactivate", data = "")] +async fn bulk_deactivate_members( + org_id: OrganizationId, + data: Json, + headers: AdminHeaders, + conn: DbConn, +) -> JsonResult { + bulk_revoke_members(org_id, data, headers, conn).await +} + +#[put("/organizations//users//revoke")] +async fn revoke_member( + org_id: OrganizationId, + member_id: MembershipId, + headers: AdminHeaders, + mut conn: DbConn, +) -> EmptyResult { + _revoke_member(&org_id, &member_id, &headers, &mut conn).await } #[put("/organizations//users/revoke", data = "")] -async fn bulk_revoke_organization_user( - org_id: &str, - data: Json, +async fn bulk_revoke_members( + org_id: OrganizationId, + data: Json, headers: AdminHeaders, mut conn: DbConn, -) -> Json { +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } let data = data.into_inner(); let mut bulk_response = Vec::new(); match data.ids { - Some(org_users) => { - for org_user_id in org_users { - let err_msg = match _revoke_organization_user(org_id, &org_user_id, &headers, &mut conn).await { + Some(members) => { + for member_id in members { + let err_msg = match _revoke_member(&org_id, &member_id, &headers, &mut conn).await { Ok(_) => String::new(), Err(e) => format!("{e:?}"), }; @@ -2134,7 +2392,7 @@ async fn bulk_revoke_organization_user( bulk_response.push(json!( { "object": "OrganizationUserBulkResponseModel", - "id": org_user_id, + "id": member_id, "error": err_msg } )); @@ -2143,39 +2401,39 @@ async fn bulk_revoke_organization_user( None => error!("No users to revoke"), } - Json(json!({ + Ok(Json(json!({ "data": bulk_response, "object": "list", "continuationToken": null - })) + }))) } -async fn _revoke_organization_user( - org_id: &str, - org_user_id: &str, +async fn _revoke_member( + org_id: &OrganizationId, + member_id: &MembershipId, headers: &AdminHeaders, conn: &mut DbConn, ) -> EmptyResult { - match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { - Some(mut user_org) if user_org.status > UserOrgStatus::Revoked as i32 => { - if user_org.user_uuid == headers.user.uuid { + match Membership::find_by_uuid_and_org(member_id, org_id, conn).await { + Some(mut member) if member.status > MembershipStatus::Revoked as i32 => { + if member.user_uuid == headers.user.uuid { err!("You cannot revoke yourself") } - if user_org.atype == UserOrgType::Owner && headers.org_user_type != UserOrgType::Owner { + if member.atype == MembershipType::Owner && headers.membership_type != MembershipType::Owner { err!("Only owners can revoke other owners") } - if user_org.atype == UserOrgType::Owner - && UserOrganization::count_confirmed_by_org_and_type(org_id, UserOrgType::Owner, conn).await <= 1 + if member.atype == MembershipType::Owner + && Membership::count_confirmed_by_org_and_type(org_id, MembershipType::Owner, conn).await <= 1 { err!("Organization must have at least one confirmed owner") } - user_org.revoke(); - user_org.save(conn).await?; + member.revoke(); + member.save(conn).await?; log_event( EventType::OrganizationUserRevoked as i32, - &user_org.uuid, + &member.uuid, org_id, &headers.user.uuid, headers.device.atype, @@ -2191,49 +2449,52 @@ async fn _revoke_organization_user( } // Pre web-vault v2022.9.x endpoint -#[put("/organizations//users//activate")] -async fn activate_organization_user( - org_id: &str, - org_user_id: &str, +#[put("/organizations//users//activate")] +async fn activate_member( + org_id: OrganizationId, + member_id: MembershipId, headers: AdminHeaders, mut conn: DbConn, ) -> EmptyResult { - _restore_organization_user(org_id, org_user_id, &headers, &mut conn).await + _restore_member(&org_id, &member_id, &headers, &mut conn).await } // Pre web-vault v2022.9.x endpoint #[put("/organizations//users/activate", data = "")] -async fn bulk_activate_organization_user( - org_id: &str, - data: Json, +async fn bulk_activate_members( + org_id: OrganizationId, + data: Json, headers: AdminHeaders, conn: DbConn, -) -> Json { - bulk_restore_organization_user(org_id, data, headers, conn).await +) -> JsonResult { + bulk_restore_members(org_id, data, headers, conn).await } -#[put("/organizations//users//restore")] -async fn restore_organization_user( - org_id: &str, - org_user_id: &str, +#[put("/organizations//users//restore")] +async fn restore_member( + org_id: OrganizationId, + member_id: MembershipId, headers: AdminHeaders, mut conn: DbConn, ) -> EmptyResult { - _restore_organization_user(org_id, org_user_id, &headers, &mut conn).await + _restore_member(&org_id, &member_id, &headers, &mut conn).await } #[put("/organizations//users/restore", data = "")] -async fn bulk_restore_organization_user( - org_id: &str, - data: Json, +async fn bulk_restore_members( + org_id: OrganizationId, + data: Json, headers: AdminHeaders, mut conn: DbConn, -) -> Json { +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } let data = data.into_inner(); let mut bulk_response = Vec::new(); - for org_user_id in data.ids { - let err_msg = match _restore_organization_user(org_id, &org_user_id, &headers, &mut conn).await { + for member_id in data.ids { + let err_msg = match _restore_member(&org_id, &member_id, &headers, &mut conn).await { Ok(_) => String::new(), Err(e) => format!("{e:?}"), }; @@ -2241,42 +2502,42 @@ async fn bulk_restore_organization_user( bulk_response.push(json!( { "object": "OrganizationUserBulkResponseModel", - "id": org_user_id, + "id": member_id, "error": err_msg } )); } - Json(json!({ + Ok(Json(json!({ "data": bulk_response, "object": "list", "continuationToken": null - })) + }))) } -async fn _restore_organization_user( - org_id: &str, - org_user_id: &str, +async fn _restore_member( + org_id: &OrganizationId, + member_id: &MembershipId, headers: &AdminHeaders, conn: &mut DbConn, ) -> EmptyResult { - match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { - Some(mut user_org) if user_org.status < UserOrgStatus::Accepted as i32 => { - if user_org.user_uuid == headers.user.uuid { + match Membership::find_by_uuid_and_org(member_id, org_id, conn).await { + Some(mut member) if member.status < MembershipStatus::Accepted as i32 => { + if member.user_uuid == headers.user.uuid { err!("You cannot restore yourself") } - if user_org.atype == UserOrgType::Owner && headers.org_user_type != UserOrgType::Owner { + if member.atype == MembershipType::Owner && headers.membership_type != MembershipType::Owner { err!("Only owners can restore other owners") } - // This check is also done at accept_invite(), _confirm_invite, _activate_user(), edit_user(), admin::update_user_org_type + // This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type // It returns different error messages per function. - if user_org.atype < UserOrgType::Admin { - match OrgPolicy::is_user_allowed(&user_org.user_uuid, org_id, false, conn).await { + if member.atype < MembershipType::Admin { + match OrgPolicy::is_user_allowed(&member.user_uuid, org_id, false, conn).await { Ok(_) => {} Err(OrgPolicyErr::TwoFactorMissing) => { if CONFIG.email_2fa_auto_fallback() { - two_factor::email::find_and_activate_email_2fa(&user_org.user_uuid, conn).await?; + two_factor::email::find_and_activate_email_2fa(&member.user_uuid, conn).await?; } else { err!("You cannot restore this user because they have not setup 2FA"); } @@ -2287,12 +2548,12 @@ async fn _restore_organization_user( } } - user_org.restore(); - user_org.save(conn).await?; + member.restore(); + member.save(conn).await?; log_event( EventType::OrganizationUserRestored as i32, - &user_org.uuid, + &member.uuid, org_id, &headers.user.uuid, headers.device.atype, @@ -2308,10 +2569,13 @@ async fn _restore_organization_user( } #[get("/organizations//groups")] -async fn get_groups(org_id: &str, _headers: ManagerHeadersLoose, mut conn: DbConn) -> JsonResult { +async fn get_groups(org_id: OrganizationId, headers: ManagerHeadersLoose, mut conn: DbConn) -> JsonResult { + if org_id != headers.membership.org_uuid { + err!("Organization not found", "Organization id's do not match"); + } let groups: Vec = if CONFIG.org_groups_enabled() { // Group::find_by_organization(&org_id, &mut conn).await.iter().map(Group::to_json).collect::() - let groups = Group::find_by_organization(org_id, &mut conn).await; + let groups = Group::find_by_organization(&org_id, &mut conn).await; let mut groups_json = Vec::with_capacity(groups.len()); for g in groups { @@ -2331,6 +2595,11 @@ async fn get_groups(org_id: &str, _headers: ManagerHeadersLoose, mut conn: DbCon }))) } +#[get("/organizations//groups/details", rank = 1)] +async fn get_groups_details(org_id: OrganizationId, headers: ManagerHeadersLoose, conn: DbConn) -> JsonResult { + get_groups(org_id, headers, conn).await +} + #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct GroupRequest { @@ -2338,13 +2607,13 @@ struct GroupRequest { #[serde(default)] access_all: bool, external_id: Option, - collections: Vec, - users: Vec, + collections: Vec, + users: Vec, } impl GroupRequest { - pub fn to_group(&self, organizations_uuid: &str) -> Group { - Group::new(String::from(organizations_uuid), self.name.clone(), self.access_all, self.external_id.clone()) + pub fn to_group(&self, org_uuid: &OrganizationId) -> Group { + Group::new(org_uuid.clone(), self.name.clone(), self.access_all, self.external_id.clone()) } pub fn update_group(&self, mut group: Group) -> Group { @@ -2359,42 +2628,23 @@ impl GroupRequest { #[derive(Deserialize, Serialize)] #[serde(rename_all = "camelCase")] -struct SelectionReadOnly { - id: String, +struct CollectionData { + id: CollectionId, read_only: bool, hide_passwords: bool, + manage: bool, } -impl SelectionReadOnly { - pub fn to_collection_group(&self, groups_uuid: String) -> CollectionGroup { - CollectionGroup::new(self.id.clone(), groups_uuid, self.read_only, self.hide_passwords) - } - - pub fn to_collection_group_details_read_only(collection_group: &CollectionGroup) -> SelectionReadOnly { - SelectionReadOnly { - id: collection_group.groups_uuid.clone(), - read_only: collection_group.read_only, - hide_passwords: collection_group.hide_passwords, - } - } - - pub fn to_collection_user_details_read_only(collection_user: &CollectionUser) -> SelectionReadOnly { - SelectionReadOnly { - id: collection_user.user_uuid.clone(), - read_only: collection_user.read_only, - hide_passwords: collection_user.hide_passwords, - } - } - - pub fn to_json(&self) -> Value { - json!(self) +impl CollectionData { + pub fn to_collection_group(&self, groups_uuid: GroupId) -> CollectionGroup { + CollectionGroup::new(self.id.clone(), groups_uuid, self.read_only, self.hide_passwords, self.manage) } } #[post("/organizations//groups/", data = "")] async fn post_group( - org_id: &str, - group_id: &str, + org_id: OrganizationId, + group_id: GroupId, data: Json, headers: AdminHeaders, conn: DbConn, @@ -2403,18 +2653,23 @@ async fn post_group( } #[post("/organizations//groups", data = "")] -async fn post_groups(org_id: &str, headers: AdminHeaders, data: Json, mut conn: DbConn) -> JsonResult { +async fn post_groups( + org_id: OrganizationId, + headers: AdminHeaders, + data: Json, + mut conn: DbConn, +) -> JsonResult { if !CONFIG.org_groups_enabled() { err!("Group support is disabled"); } let group_request = data.into_inner(); - let group = group_request.to_group(org_id); + let group = group_request.to_group(&org_id); log_event( EventType::GroupCreated as i32, &group.uuid, - org_id, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -2427,8 +2682,8 @@ async fn post_groups(org_id: &str, headers: AdminHeaders, data: Json/groups/", data = "")] async fn put_group( - org_id: &str, - group_id: &str, + org_id: OrganizationId, + group_id: GroupId, data: Json, headers: AdminHeaders, mut conn: DbConn, @@ -2437,21 +2692,20 @@ async fn put_group( err!("Group support is disabled"); } - let group = match Group::find_by_uuid(group_id, &mut conn).await { - Some(group) => group, - None => err!("Group not found"), + let Some(group) = Group::find_by_uuid_and_org(&group_id, &org_id, &mut conn).await else { + err!("Group not found", "Group uuid is invalid or does not belong to the organization") }; let group_request = data.into_inner(); let updated_group = group_request.update_group(group); - CollectionGroup::delete_all_by_group(group_id, &mut conn).await?; - GroupUser::delete_all_by_group(group_id, &mut conn).await?; + CollectionGroup::delete_all_by_group(&group_id, &mut conn).await?; + GroupUser::delete_all_by_group(&group_id, &mut conn).await?; log_event( EventType::GroupUpdated as i32, &updated_group.uuid, - org_id, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -2464,27 +2718,27 @@ async fn put_group( async fn add_update_group( mut group: Group, - collections: Vec, - users: Vec, - org_id: &str, + collections: Vec, + members: Vec, + org_id: OrganizationId, headers: &AdminHeaders, conn: &mut DbConn, ) -> JsonResult { group.save(conn).await?; - for selection_read_only_request in collections { - let mut collection_group = selection_read_only_request.to_collection_group(group.uuid.clone()); + for col_selection in collections { + let mut collection_group = col_selection.to_collection_group(group.uuid.clone()); collection_group.save(conn).await?; } - for assigned_user_id in users { - let mut user_entry = GroupUser::new(group.uuid.clone(), assigned_user_id.clone()); + for assigned_member in members { + let mut user_entry = GroupUser::new(group.uuid.clone(), assigned_member.clone()); user_entry.save(conn).await?; log_event( EventType::OrganizationUserUpdatedGroups as i32, - &assigned_user_id, - org_id, + &assigned_member, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -2502,38 +2756,59 @@ async fn add_update_group( }))) } -#[get("/organizations/<_org_id>/groups//details")] -async fn get_group_details(_org_id: &str, group_id: &str, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { +#[get("/organizations//groups//details")] +async fn get_group_details( + org_id: OrganizationId, + group_id: GroupId, + headers: AdminHeaders, + mut conn: DbConn, +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } if !CONFIG.org_groups_enabled() { err!("Group support is disabled"); } - let group = match Group::find_by_uuid(group_id, &mut conn).await { - Some(group) => group, - _ => err!("Group could not be found!"), + let Some(group) = Group::find_by_uuid_and_org(&group_id, &org_id, &mut conn).await else { + err!("Group not found", "Group uuid is invalid or does not belong to the organization") }; Ok(Json(group.to_json_details(&mut conn).await)) } #[post("/organizations//groups//delete")] -async fn post_delete_group(org_id: &str, group_id: &str, headers: AdminHeaders, mut conn: DbConn) -> EmptyResult { - _delete_group(org_id, group_id, &headers, &mut conn).await +async fn post_delete_group( + org_id: OrganizationId, + group_id: GroupId, + headers: AdminHeaders, + mut conn: DbConn, +) -> EmptyResult { + _delete_group(&org_id, &group_id, &headers, &mut conn).await } #[delete("/organizations//groups/")] -async fn delete_group(org_id: &str, group_id: &str, headers: AdminHeaders, mut conn: DbConn) -> EmptyResult { - _delete_group(org_id, group_id, &headers, &mut conn).await +async fn delete_group( + org_id: OrganizationId, + group_id: GroupId, + headers: AdminHeaders, + mut conn: DbConn, +) -> EmptyResult { + _delete_group(&org_id, &group_id, &headers, &mut conn).await } -async fn _delete_group(org_id: &str, group_id: &str, headers: &AdminHeaders, conn: &mut DbConn) -> EmptyResult { +async fn _delete_group( + org_id: &OrganizationId, + group_id: &GroupId, + headers: &AdminHeaders, + conn: &mut DbConn, +) -> EmptyResult { if !CONFIG.org_groups_enabled() { err!("Group support is disabled"); } - let group = match Group::find_by_uuid(group_id, conn).await { - Some(group) => group, - _ => err!("Group not found"), + let Some(group) = Group::find_by_uuid_and_org(group_id, org_id, conn).await else { + err!("Group not found", "Group uuid is invalid or does not belong to the organization") }; log_event( @@ -2552,8 +2827,8 @@ async fn _delete_group(org_id: &str, group_id: &str, headers: &AdminHeaders, con #[delete("/organizations//groups", data = "")] async fn bulk_delete_groups( - org_id: &str, - data: Json, + org_id: OrganizationId, + data: Json, headers: AdminHeaders, mut conn: DbConn, ) -> EmptyResult { @@ -2561,76 +2836,84 @@ async fn bulk_delete_groups( err!("Group support is disabled"); } - let data: OrgBulkIds = data.into_inner(); + let data: BulkGroupIds = data.into_inner(); for group_id in data.ids { - _delete_group(org_id, &group_id, &headers, &mut conn).await? + _delete_group(&org_id, &group_id, &headers, &mut conn).await? } Ok(()) } -#[get("/organizations/<_org_id>/groups/")] -async fn get_group(_org_id: &str, group_id: &str, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { +#[get("/organizations//groups/", rank = 2)] +async fn get_group(org_id: OrganizationId, group_id: GroupId, headers: AdminHeaders, mut conn: DbConn) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } if !CONFIG.org_groups_enabled() { err!("Group support is disabled"); } - let group = match Group::find_by_uuid(group_id, &mut conn).await { - Some(group) => group, - _ => err!("Group not found"), + let Some(group) = Group::find_by_uuid_and_org(&group_id, &org_id, &mut conn).await else { + err!("Group not found", "Group uuid is invalid or does not belong to the organization") }; Ok(Json(group.to_json())) } -#[get("/organizations/<_org_id>/groups//users")] -async fn get_group_users(_org_id: &str, group_id: &str, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { +#[get("/organizations//groups//users")] +async fn get_group_members( + org_id: OrganizationId, + group_id: GroupId, + headers: AdminHeaders, + mut conn: DbConn, +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } if !CONFIG.org_groups_enabled() { err!("Group support is disabled"); } - match Group::find_by_uuid(group_id, &mut conn).await { - Some(_) => { /* Do nothing */ } - _ => err!("Group could not be found!"), + if Group::find_by_uuid_and_org(&group_id, &org_id, &mut conn).await.is_none() { + err!("Group could not be found!", "Group uuid is invalid or does not belong to the organization") }; - let group_users: Vec = GroupUser::find_by_group(group_id, &mut conn) + let group_members: Vec = GroupUser::find_by_group(&group_id, &mut conn) .await .iter() .map(|entry| entry.users_organizations_uuid.clone()) .collect(); - Ok(Json(json!(group_users))) + Ok(Json(json!(group_members))) } #[put("/organizations//groups//users", data = "")] -async fn put_group_users( - org_id: &str, - group_id: &str, +async fn put_group_members( + org_id: OrganizationId, + group_id: GroupId, headers: AdminHeaders, - data: Json>, + data: Json>, mut conn: DbConn, ) -> EmptyResult { if !CONFIG.org_groups_enabled() { err!("Group support is disabled"); } - match Group::find_by_uuid(group_id, &mut conn).await { - Some(_) => { /* Do nothing */ } - _ => err!("Group could not be found!"), + if Group::find_by_uuid_and_org(&group_id, &org_id, &mut conn).await.is_none() { + err!("Group could not be found!", "Group uuid is invalid or does not belong to the organization") }; - GroupUser::delete_all_by_group(group_id, &mut conn).await?; + GroupUser::delete_all_by_group(&group_id, &mut conn).await?; - let assigned_user_ids = data.into_inner(); - for assigned_user_id in assigned_user_ids { - let mut user_entry = GroupUser::new(String::from(group_id), assigned_user_id.clone()); + let assigned_members = data.into_inner(); + for assigned_member in assigned_members { + let mut user_entry = GroupUser::new(group_id.clone(), assigned_member.clone()); user_entry.save(&mut conn).await?; log_event( EventType::OrganizationUserUpdatedGroups as i32, - &assigned_user_id, - org_id, + &assigned_member, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -2642,19 +2925,26 @@ async fn put_group_users( Ok(()) } -#[get("/organizations/<_org_id>/users//groups")] -async fn get_user_groups(_org_id: &str, user_id: &str, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { +#[get("/organizations//users//groups")] +async fn get_user_groups( + org_id: OrganizationId, + member_id: MembershipId, + headers: AdminHeaders, + mut conn: DbConn, +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } if !CONFIG.org_groups_enabled() { err!("Group support is disabled"); } - match UserOrganization::find_by_uuid(user_id, &mut conn).await { - Some(_) => { /* Do nothing */ } - _ => err!("User could not be found!"), + if Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await.is_none() { + err!("User could not be found!") }; - let user_groups: Vec = - GroupUser::find_by_user(user_id, &mut conn).await.iter().map(|entry| entry.groups_uuid.clone()).collect(); + let user_groups: Vec = + GroupUser::find_by_member(&member_id, &mut conn).await.iter().map(|entry| entry.groups_uuid.clone()).collect(); Ok(Json(json!(user_groups))) } @@ -2662,53 +2952,51 @@ async fn get_user_groups(_org_id: &str, user_id: &str, _headers: AdminHeaders, m #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct OrganizationUserUpdateGroupsRequest { - group_ids: Vec, + group_ids: Vec, } -#[post("/organizations//users//groups", data = "")] +#[post("/organizations//users//groups", data = "")] async fn post_user_groups( - org_id: &str, - org_user_id: &str, + org_id: OrganizationId, + member_id: MembershipId, data: Json, headers: AdminHeaders, conn: DbConn, ) -> EmptyResult { - put_user_groups(org_id, org_user_id, data, headers, conn).await + put_user_groups(org_id, member_id, data, headers, conn).await } -#[put("/organizations//users//groups", data = "")] +#[put("/organizations//users//groups", data = "")] async fn put_user_groups( - org_id: &str, - org_user_id: &str, + org_id: OrganizationId, + member_id: MembershipId, data: Json, headers: AdminHeaders, mut conn: DbConn, ) -> EmptyResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } if !CONFIG.org_groups_enabled() { err!("Group support is disabled"); } - let user_org = match UserOrganization::find_by_uuid(org_user_id, &mut conn).await { - Some(uo) => uo, - _ => err!("User could not be found!"), - }; - - if user_org.org_uuid != org_id { - err!("Group doesn't belong to organization"); + if Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await.is_none() { + err!("User could not be found or does not belong to the organization."); } - GroupUser::delete_all_by_user(org_user_id, &mut conn).await?; + GroupUser::delete_all_by_member(&member_id, &mut conn).await?; let assigned_group_ids = data.into_inner(); for assigned_group_id in assigned_group_ids.group_ids { - let mut group_user = GroupUser::new(assigned_group_id.clone(), String::from(org_user_id)); + let mut group_user = GroupUser::new(assigned_group_id.clone(), member_id.clone()); group_user.save(&mut conn).await?; } log_event( EventType::OrganizationUserUpdatedGroups as i32, - org_user_id, - org_id, + &member_id, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -2719,51 +3007,44 @@ async fn put_user_groups( Ok(()) } -#[post("/organizations//groups//delete-user/")] -async fn post_delete_group_user( - org_id: &str, - group_id: &str, - org_user_id: &str, +#[post("/organizations//groups//delete-user/")] +async fn post_delete_group_member( + org_id: OrganizationId, + group_id: GroupId, + member_id: MembershipId, headers: AdminHeaders, conn: DbConn, ) -> EmptyResult { - delete_group_user(org_id, group_id, org_user_id, headers, conn).await + delete_group_member(org_id, group_id, member_id, headers, conn).await } -#[delete("/organizations//groups//users/")] -async fn delete_group_user( - org_id: &str, - group_id: &str, - org_user_id: &str, +#[delete("/organizations//groups//users/")] +async fn delete_group_member( + org_id: OrganizationId, + group_id: GroupId, + member_id: MembershipId, headers: AdminHeaders, mut conn: DbConn, ) -> EmptyResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } if !CONFIG.org_groups_enabled() { err!("Group support is disabled"); } - let user_org = match UserOrganization::find_by_uuid(org_user_id, &mut conn).await { - Some(uo) => uo, - _ => err!("User could not be found!"), - }; - - if user_org.org_uuid != org_id { - err!("User doesn't belong to organization"); + if Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await.is_none() { + err!("User could not be found or does not belong to the organization."); } - let group = match Group::find_by_uuid(group_id, &mut conn).await { - Some(g) => g, - _ => err!("Group could not be found!"), - }; - - if group.organizations_uuid != org_id { - err!("Group doesn't belong to organization"); + if Group::find_by_uuid_and_org(&group_id, &org_id, &mut conn).await.is_none() { + err!("Group could not be found or does not belong to the organization."); } log_event( EventType::OrganizationUserUpdatedGroups as i32, - org_user_id, - org_id, + &member_id, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -2771,7 +3052,7 @@ async fn delete_group_user( ) .await; - GroupUser::delete_by_group_id_and_user_id(group_id, org_user_id, &mut conn).await + GroupUser::delete_by_group_and_member(&group_id, &member_id, &mut conn).await } #[derive(Deserialize)] @@ -2789,14 +3070,20 @@ struct OrganizationUserResetPasswordRequest { key: String, } -// Upstrem reports this is the renamed endpoint instead of `/keys` +// Upstream reports this is the renamed endpoint instead of `/keys` // But the clients do not seem to use this at all // Just add it here in case they will #[get("/organizations//public-key")] -async fn get_organization_public_key(org_id: &str, _headers: Headers, mut conn: DbConn) -> JsonResult { - let org = match Organization::find_by_uuid(org_id, &mut conn).await { - Some(organization) => organization, - None => err!("Organization not found"), +async fn get_organization_public_key( + org_id: OrganizationId, + headers: OrgMemberHeaders, + mut conn: DbConn, +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + let Some(org) = Organization::find_by_uuid(&org_id, &mut conn).await else { + err!("Organization not found") }; Ok(Json(json!({ @@ -2808,40 +3095,40 @@ async fn get_organization_public_key(org_id: &str, _headers: Headers, mut conn: // Obsolete - Renamed to public-key (2023.8), left for backwards compatibility with older clients // https://github.com/bitwarden/server/blob/25dc0c9178e3e3584074bbef0d4be827b7c89415/src/Api/AdminConsole/Controllers/OrganizationsController.cs#L463-L468 #[get("/organizations//keys")] -async fn get_organization_keys(org_id: &str, headers: Headers, conn: DbConn) -> JsonResult { +async fn get_organization_keys(org_id: OrganizationId, headers: OrgMemberHeaders, conn: DbConn) -> JsonResult { get_organization_public_key(org_id, headers, conn).await } -#[put("/organizations//users//reset-password", data = "")] +#[put("/organizations//users//reset-password", data = "")] async fn put_reset_password( - org_id: &str, - org_user_id: &str, + org_id: OrganizationId, + member_id: MembershipId, headers: AdminHeaders, data: Json, mut conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { - let org = match Organization::find_by_uuid(org_id, &mut conn).await { - Some(org) => org, - None => err!("Required organization not found"), + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + let Some(org) = Organization::find_by_uuid(&org_id, &mut conn).await else { + err!("Required organization not found") }; - let org_user = match UserOrganization::find_by_uuid_and_org(org_user_id, &org.uuid, &mut conn).await { - Some(user) => user, - None => err!("User to reset isn't member of required organization"), + let Some(member) = Membership::find_by_uuid_and_org(&member_id, &org.uuid, &mut conn).await else { + err!("User to reset isn't member of required organization") }; - let user = match User::find_by_uuid(&org_user.user_uuid, &mut conn).await { - Some(user) => user, - None => err!("User not found"), + let Some(user) = User::find_by_uuid(&member.user_uuid, &mut conn).await else { + err!("User not found") }; - check_reset_password_applicable_and_permissions(org_id, org_user_id, &headers, &mut conn).await?; + check_reset_password_applicable_and_permissions(&org_id, &member_id, &headers, &mut conn).await?; - if org_user.reset_password_key.is_none() { + if member.reset_password_key.is_none() { err!("Password reset not or not correctly enrolled"); } - if org_user.status != (UserOrgStatus::Confirmed as i32) { + if member.status != (MembershipStatus::Confirmed as i32) { err!("Organization user must be confirmed for password reset functionality"); } @@ -2861,8 +3148,8 @@ async fn put_reset_password( log_event( EventType::OrganizationUserAdminResetPassword as i32, - org_user_id, - org_id, + &member_id, + &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, @@ -2873,29 +3160,29 @@ async fn put_reset_password( Ok(()) } -#[get("/organizations//users//reset-password-details")] +#[get("/organizations//users//reset-password-details")] async fn get_reset_password_details( - org_id: &str, - org_user_id: &str, + org_id: OrganizationId, + member_id: MembershipId, headers: AdminHeaders, mut conn: DbConn, ) -> JsonResult { - let org = match Organization::find_by_uuid(org_id, &mut conn).await { - Some(org) => org, - None => err!("Required organization not found"), + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } + let Some(org) = Organization::find_by_uuid(&org_id, &mut conn).await else { + err!("Required organization not found") }; - let org_user = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &mut conn).await { - Some(user) => user, - None => err!("User to reset isn't member of required organization"), + let Some(member) = Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await else { + err!("User to reset isn't member of required organization") }; - let user = match User::find_by_uuid(&org_user.user_uuid, &mut conn).await { - Some(user) => user, - None => err!("User not found"), + let Some(user) = User::find_by_uuid(&member.user_uuid, &mut conn).await else { + err!("User not found") }; - check_reset_password_applicable_and_permissions(org_id, org_user_id, &headers, &mut conn).await?; + check_reset_password_applicable_and_permissions(&org_id, &member_id, &headers, &mut conn).await?; // https://github.com/bitwarden/server/blob/3b50ccb9f804efaacdc46bed5b60e5b28eddefcf/src/Api/Models/Response/Organizations/OrganizationUserResponseModel.cs#L111 Ok(Json(json!({ @@ -2904,41 +3191,39 @@ async fn get_reset_password_details( "kdfIterations":user.client_kdf_iter, "kdfMemory":user.client_kdf_memory, "kdfParallelism":user.client_kdf_parallelism, - "resetPasswordKey":org_user.reset_password_key, + "resetPasswordKey":member.reset_password_key, "encryptedPrivateKey":org.private_key, }))) } async fn check_reset_password_applicable_and_permissions( - org_id: &str, - org_user_id: &str, + org_id: &OrganizationId, + member_id: &MembershipId, headers: &AdminHeaders, conn: &mut DbConn, ) -> EmptyResult { check_reset_password_applicable(org_id, conn).await?; - let target_user = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { - Some(user) => user, - None => err!("Reset target user not found"), + let Some(target_user) = Membership::find_by_uuid_and_org(member_id, org_id, conn).await else { + err!("Reset target user not found") }; // Resetting user must be higher/equal to user to reset - match headers.org_user_type { - UserOrgType::Owner => Ok(()), - UserOrgType::Admin if target_user.atype <= UserOrgType::Admin => Ok(()), + match headers.membership_type { + MembershipType::Owner => Ok(()), + MembershipType::Admin if target_user.atype <= MembershipType::Admin => Ok(()), _ => err!("No permission to reset this user's password"), } } -async fn check_reset_password_applicable(org_id: &str, conn: &mut DbConn) -> EmptyResult { +async fn check_reset_password_applicable(org_id: &OrganizationId, conn: &mut DbConn) -> EmptyResult { if !CONFIG.mail_enabled() { err!("Password reset is not supported on an email-disabled instance."); } - let policy = match OrgPolicy::find_by_org_and_type(org_id, OrgPolicyType::ResetPassword, conn).await { - Some(p) => p, - None => err!("Policy not found"), + let Some(policy) = OrgPolicy::find_by_org_and_type(org_id, OrgPolicyType::ResetPassword, conn).await else { + err!("Policy not found") }; if !policy.enabled { @@ -2948,27 +3233,26 @@ async fn check_reset_password_applicable(org_id: &str, conn: &mut DbConn) -> Emp Ok(()) } -#[put("/organizations//users//reset-password-enrollment", data = "")] +#[put("/organizations//users//reset-password-enrollment", data = "")] async fn put_reset_password_enrollment( - org_id: &str, - org_user_id: &str, + org_id: OrganizationId, + member_id: MembershipId, headers: Headers, data: Json, mut conn: DbConn, ) -> EmptyResult { - let mut org_user = match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await { - Some(u) => u, - None => err!("User to enroll isn't member of required organization"), + let Some(mut member) = Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &mut conn).await else { + err!("User to enroll isn't member of required organization") }; - check_reset_password_applicable(org_id, &mut conn).await?; + check_reset_password_applicable(&org_id, &mut conn).await?; let reset_request = data.into_inner(); if reset_request.reset_password_key.is_none() - && OrgPolicy::org_is_reset_password_auto_enroll(org_id, &mut conn).await + && OrgPolicy::org_is_reset_password_auto_enroll(&org_id, &mut conn).await { - err!("Reset password can't be withdrawed due to an enterprise policy"); + err!("Reset password can't be withdrawn due to an enterprise policy"); } if reset_request.reset_password_key.is_some() { @@ -2980,16 +3264,16 @@ async fn put_reset_password_enrollment( .await?; } - org_user.reset_password_key = reset_request.reset_password_key; - org_user.save(&mut conn).await?; + member.reset_password_key = reset_request.reset_password_key; + member.save(&mut conn).await?; - let log_id = if org_user.reset_password_key.is_some() { + let log_id = if member.reset_password_key.is_some() { EventType::OrganizationUserResetPasswordEnroll as i32 } else { EventType::OrganizationUserResetPasswordWithdraw as i32 }; - log_event(log_id, org_user_id, org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; + log_event(log_id, &member_id, &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; Ok(()) } @@ -3003,11 +3287,14 @@ async fn put_reset_password_enrollment( // Else the export will be just an empty JSON file. #[get("/organizations//export")] async fn get_org_export( - org_id: &str, + org_id: OrganizationId, headers: AdminHeaders, client_version: Option, mut conn: DbConn, -) -> Json { +) -> JsonResult { + if org_id != headers.org_id { + err!("Organization not found", "Organization id's do not match"); + } // Since version v2023.1.0 the format of the export is different. // Also, this endpoint was created since v2022.9.0. // Therefore, we will check for any version smaller then v2023.1.0 and return a different response. @@ -3023,29 +3310,29 @@ async fn get_org_export( // Also both main keys here need to be lowercase, else the export will fail. if use_list_response_model { // Backwards compatible pre v2023.1.0 response - Json(json!({ + Ok(Json(json!({ "collections": { - "data": convert_json_key_lcase_first(_get_org_collections(org_id, &mut conn).await), + "data": convert_json_key_lcase_first(_get_org_collections(&org_id, &mut conn).await), "object": "list", "continuationToken": null, }, "ciphers": { - "data": convert_json_key_lcase_first(_get_org_details(org_id, &headers.host, &headers.user.uuid, &mut conn).await), + "data": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await), "object": "list", "continuationToken": null, } - })) + }))) } else { // v2023.1.0 and newer response - Json(json!({ - "collections": convert_json_key_lcase_first(_get_org_collections(org_id, &mut conn).await), - "ciphers": convert_json_key_lcase_first(_get_org_details(org_id, &headers.host, &headers.user.uuid, &mut conn).await), - })) + Ok(Json(json!({ + "collections": convert_json_key_lcase_first(_get_org_collections(&org_id, &mut conn).await), + "ciphers": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await), + }))) } } async fn _api_key( - org_id: &str, + org_id: &OrganizationId, data: Json, rotate: bool, headers: AdminHeaders, @@ -3068,7 +3355,7 @@ async fn _api_key( } None => { let api_key = crate::crypto::generate_api_key(); - let new_org_api_key = OrganizationApiKey::new(String::from(org_id), api_key); + let new_org_api_key = OrganizationApiKey::new(org_id.clone(), api_key); new_org_api_key.save(&conn).await.expect("Error creating organization API Key"); new_org_api_key } @@ -3082,16 +3369,21 @@ async fn _api_key( } #[post("/organizations//api-key", data = "")] -async fn api_key(org_id: &str, data: Json, headers: AdminHeaders, conn: DbConn) -> JsonResult { - _api_key(org_id, data, false, headers, conn).await -} - -#[post("/organizations//rotate-api-key", data = "")] -async fn rotate_api_key( - org_id: &str, +async fn api_key( + org_id: OrganizationId, data: Json, headers: AdminHeaders, conn: DbConn, ) -> JsonResult { - _api_key(org_id, data, true, headers, conn).await + _api_key(&org_id, data, false, headers, conn).await +} + +#[post("/organizations//rotate-api-key", data = "")] +async fn rotate_api_key( + org_id: OrganizationId, + data: Json, + headers: AdminHeaders, + conn: DbConn, +) -> JsonResult { + _api_key(&org_id, data, true, headers, conn).await } diff --git a/src/api/core/public.rs b/src/api/core/public.rs index 737d30dd..1c85ae1b 100644 --- a/src/api/core/public.rs +++ b/src/api/core/public.rs @@ -52,40 +52,36 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db let data = data.into_inner(); for user_data in &data.members { + let mut user_created: bool = false; if user_data.deleted { // If user is marked for deletion and it exists, revoke it - if let Some(mut user_org) = - UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await - { + if let Some(mut member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await { // Only revoke a user if it is not the last confirmed owner - let revoked = if user_org.atype == UserOrgType::Owner - && user_org.status == UserOrgStatus::Confirmed as i32 + let revoked = if member.atype == MembershipType::Owner + && member.status == MembershipStatus::Confirmed as i32 { - if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn).await - <= 1 + if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &mut conn).await <= 1 { warn!("Can't revoke the last owner"); false } else { - user_org.revoke() + member.revoke() } } else { - user_org.revoke() + member.revoke() }; - let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone())); + let ext_modified = member.set_external_id(Some(user_data.external_id.clone())); if revoked || ext_modified { - user_org.save(&mut conn).await?; + member.save(&mut conn).await?; } } // If user is part of the organization, restore it - } else if let Some(mut user_org) = - UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await - { - let restored = user_org.restore(); - let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone())); + } else if let Some(mut member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await { + let restored = member.restore(); + let ext_modified = member.set_external_id(Some(user_data.external_id.clone())); if restored || ext_modified { - user_org.save(&mut conn).await?; + member.save(&mut conn).await?; } } else { // If user is not part of the organization @@ -97,25 +93,25 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db new_user.save(&mut conn).await?; if !CONFIG.mail_enabled() { - let invitation = Invitation::new(&new_user.email); - invitation.save(&mut conn).await?; + Invitation::new(&new_user.email).save(&mut conn).await?; } + user_created = true; new_user } }; - let user_org_status = if CONFIG.mail_enabled() || user.password_hash.is_empty() { - UserOrgStatus::Invited as i32 + let member_status = if CONFIG.mail_enabled() || user.password_hash.is_empty() { + MembershipStatus::Invited as i32 } else { - UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites + MembershipStatus::Accepted as i32 // Automatically mark user as accepted if no email invites }; - let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone()); - new_org_user.set_external_id(Some(user_data.external_id.clone())); - new_org_user.access_all = false; - new_org_user.atype = UserOrgType::User as i32; - new_org_user.status = user_org_status; + let mut new_member = Membership::new(user.uuid.clone(), org_id.clone()); + new_member.set_external_id(Some(user_data.external_id.clone())); + new_member.access_all = false; + new_member.atype = MembershipType::User as i32; + new_member.status = member_status; - new_org_user.save(&mut conn).await?; + new_member.save(&mut conn).await?; if CONFIG.mail_enabled() { let (org_name, org_email) = match Organization::find_by_uuid(&org_id, &mut conn).await { @@ -123,8 +119,18 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db None => err!("Error looking up organization"), }; - mail::send_invite(&user, Some(org_id.clone()), Some(new_org_user.uuid), &org_name, Some(org_email)) - .await?; + if let Err(e) = + mail::send_invite(&user, org_id.clone(), new_member.uuid.clone(), &org_name, Some(org_email)).await + { + // Upon error delete the user, invite and org member records when needed + if user_created { + user.delete(&mut conn).await?; + } else { + new_member.delete(&mut conn).await?; + } + + err!(format!("Error sending invite: {e:?} ")); + } } } } @@ -149,9 +155,8 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?; for ext_id in &group_data.member_external_ids { - if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await - { - let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone()); + if let Some(member) = Membership::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await { + let mut group_user = GroupUser::new(group_uuid.clone(), member.uuid.clone()); group_user.save(&mut conn).await?; } } @@ -164,20 +169,19 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db if data.overwrite_existing { // Generate a HashSet to quickly verify if a member is listed or not. let sync_members: HashSet = data.members.into_iter().map(|m| m.external_id).collect(); - for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await { - if let Some(ref user_external_id) = user_org.external_id { + for member in Membership::find_by_org(&org_id, &mut conn).await { + if let Some(ref user_external_id) = member.external_id { if !sync_members.contains(user_external_id) { - if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 { + if member.atype == MembershipType::Owner && member.status == MembershipStatus::Confirmed as i32 { // Removing owner, check that there is at least one other confirmed owner - if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn) - .await + if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &mut conn).await <= 1 { warn!("Can't delete the last owner"); continue; } } - user_org.delete(&mut conn).await?; + member.delete(&mut conn).await?; } } } @@ -186,7 +190,7 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db Ok(()) } -pub struct PublicToken(String); +pub struct PublicToken(OrganizationId); #[rocket::async_trait] impl<'r> FromRequest<'r> for PublicToken { @@ -203,9 +207,8 @@ impl<'r> FromRequest<'r> for PublicToken { None => err_handler!("No access token provided"), }; // Check JWT token is valid and get device and user from it - let claims = match auth::decode_api_org(access_token) { - Ok(claims) => claims, - Err(_) => err_handler!("Invalid claim"), + let Ok(claims) = auth::decode_api_org(access_token) else { + err_handler!("Invalid claim") }; // Check if time is between claims.nbf and claims.exp let time_now = Utc::now().timestamp(); @@ -227,13 +230,12 @@ impl<'r> FromRequest<'r> for PublicToken { Outcome::Success(conn) => conn, _ => err_handler!("Error getting DB"), }; - let org_uuid = match claims.client_id.strip_prefix("organization.") { - Some(uuid) => uuid, - None => err_handler!("Malformed client_id"), + let Some(org_id) = claims.client_id.strip_prefix("organization.") else { + err_handler!("Malformed client_id") }; - let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_uuid, &conn).await { - Some(org_api_key) => org_api_key, - None => err_handler!("Invalid client_id"), + let org_id: OrganizationId = org_id.to_string().into(); + let Some(org_api_key) = OrganizationApiKey::find_by_org_uuid(&org_id, &conn).await else { + err_handler!("Invalid client_id") }; if org_api_key.org_uuid != claims.client_sub { err_handler!("Token not issued for this org"); diff --git a/src/api/core/sends.rs b/src/api/core/sends.rs index a7e5bcf0..e181d6ab 100644 --- a/src/api/core/sends.rs +++ b/src/api/core/sends.rs @@ -12,7 +12,7 @@ use crate::{ api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType}, auth::{ClientIp, Headers, Host}, db::{models::*, DbConn, DbPool}, - util::{NumberOrString, SafeString}, + util::NumberOrString, CONFIG, }; @@ -67,7 +67,7 @@ pub struct SendData { file_length: Option, // Used for key rotations - pub id: Option, + pub id: Option, } /// Enforces the `Disable Send` policy. A non-owner/admin user belonging to @@ -79,9 +79,9 @@ pub struct SendData { /// There is also a Vaultwarden-specific `sends_allowed` config setting that /// controls this policy globally. async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> EmptyResult { - let user_uuid = &headers.user.uuid; + let user_id = &headers.user.uuid; if !CONFIG.sends_allowed() - || OrgPolicy::is_applicable_to_user(user_uuid, OrgPolicyType::DisableSend, None, conn).await + || OrgPolicy::is_applicable_to_user(user_id, OrgPolicyType::DisableSend, None, conn).await { err!("Due to an Enterprise Policy, you are only able to delete an existing Send.") } @@ -95,9 +95,9 @@ async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> Em /// /// Ref: https://bitwarden.com/help/article/policies/#send-options async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult { - let user_uuid = &headers.user.uuid; + let user_id = &headers.user.uuid; let hide_email = data.hide_email.unwrap_or(false); - if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await { + if hide_email && OrgPolicy::is_hide_email_disabled(user_id, conn).await { err!( "Due to an Enterprise Policy, you are not allowed to hide your email address \ from recipients when creating or editing a Send." @@ -106,7 +106,7 @@ async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, c Ok(()) } -fn create_send(data: SendData, user_uuid: String) -> ApiResult { +fn create_send(data: SendData, user_id: UserId) -> ApiResult { let data_val = if data.r#type == SendType::Text as i32 { data.text } else if data.r#type == SendType::File as i32 { @@ -129,7 +129,7 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult { } let mut send = Send::new(data.r#type, data.name, data_str, data.key, data.deletion_date.naive_utc()); - send.user_uuid = Some(user_uuid); + send.user_uuid = Some(user_id); send.notes = data.notes; send.max_access_count = match data.max_access_count { Some(m) => Some(m.into_i32()?), @@ -157,18 +157,12 @@ async fn get_sends(headers: Headers, mut conn: DbConn) -> Json { })) } -#[get("/sends/")] -async fn get_send(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult { - let send = match Send::find_by_uuid(uuid, &mut conn).await { - Some(send) => send, - None => err!("Send not found"), - }; - - if send.user_uuid.as_ref() != Some(&headers.user.uuid) { - err!("Send is not owned by user") +#[get("/sends/")] +async fn get_send(send_id: SendId, headers: Headers, mut conn: DbConn) -> JsonResult { + match Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await { + Some(send) => Ok(Json(send.to_json())), + None => err!("Send not found", "Invalid send uuid or does not belong to user"), } - - Ok(Json(send.to_json())) } #[post("/sends", data = "")] @@ -255,7 +249,7 @@ async fn post_send_file(data: Form>, headers: Headers, mut conn: err!("Send content is not a file"); } - let file_id = crate::crypto::generate_send_id(); + let file_id = crate::crypto::generate_send_file_id(); let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid); let file_path = folder_path.join(&file_id); tokio::fs::create_dir_all(&folder_path).await?; @@ -330,7 +324,7 @@ async fn post_send_file_v2(data: Json, headers: Headers, mut conn: DbC let mut send = create_send(data, headers.user.uuid)?; - let file_id = crate::crypto::generate_send_id(); + let file_id = crate::crypto::generate_send_file_id(); let mut data_value: Value = serde_json::from_str(&send.data)?; if let Some(o) = data_value.as_object_mut() { @@ -352,16 +346,16 @@ async fn post_send_file_v2(data: Json, headers: Headers, mut conn: DbC #[derive(Deserialize)] #[allow(non_snake_case)] pub struct SendFileData { - id: String, + id: SendFileId, size: u64, fileName: String, } // https://github.com/bitwarden/server/blob/66f95d1c443490b653e5a15d32977e2f5a3f9e32/src/Api/Tools/Controllers/SendsController.cs#L250 -#[post("/sends//file/", format = "multipart/form-data", data = "")] +#[post("/sends//file/", format = "multipart/form-data", data = "")] async fn post_send_file_v2_data( - send_uuid: &str, - file_id: &str, + send_id: SendId, + file_id: SendFileId, data: Form>, headers: Headers, mut conn: DbConn, @@ -371,22 +365,14 @@ async fn post_send_file_v2_data( let mut data = data.into_inner(); - let Some(send) = Send::find_by_uuid(send_uuid, &mut conn).await else { - err!("Send not found. Unable to save the file.") + let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else { + err!("Send not found. Unable to save the file.", "Invalid send uuid or does not belong to user.") }; if send.atype != SendType::File as i32 { err!("Send is not a file type send."); } - let Some(send_user_id) = &send.user_uuid else { - err!("Sends are only supported for users at the moment.") - }; - - if send_user_id != &headers.user.uuid { - err!("Send doesn't belong to user."); - } - let Ok(send_data) = serde_json::from_str::(&send.data) else { err!("Unable to decode send data as json.") }; @@ -416,7 +402,7 @@ async fn post_send_file_v2_data( err!("Send file size does not match.", format!("Expected a file size of {} got {size}", send_data.size)); } - let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_uuid); + let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_id); let file_path = folder_path.join(file_id); // Check if the file already exists, if that is the case do not overwrite it @@ -456,9 +442,8 @@ async fn post_access( ip: ClientIp, nt: Notify<'_>, ) -> JsonResult { - let mut send = match Send::find_by_access_id(access_id, &mut conn).await { - Some(s) => s, - None => err_code!(SEND_INACCESSIBLE_MSG, 404), + let Some(mut send) = Send::find_by_access_id(access_id, &mut conn).await else { + err_code!(SEND_INACCESSIBLE_MSG, 404) }; if let Some(max_access_count) = send.max_access_count { @@ -500,7 +485,7 @@ async fn post_access( UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&mut conn).await, - &String::from("00000000-0000-0000-0000-000000000000"), + &String::from("00000000-0000-0000-0000-000000000000").into(), &mut conn, ) .await; @@ -510,16 +495,15 @@ async fn post_access( #[post("/sends//access/file/", data = "")] async fn post_access_file( - send_id: &str, - file_id: &str, + send_id: SendId, + file_id: SendFileId, data: Json, host: Host, mut conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - let mut send = match Send::find_by_uuid(send_id, &mut conn).await { - Some(s) => s, - None => err_code!(SEND_INACCESSIBLE_MSG, 404), + let Some(mut send) = Send::find_by_uuid(&send_id, &mut conn).await else { + err_code!(SEND_INACCESSIBLE_MSG, 404) }; if let Some(max_access_count) = send.max_access_count { @@ -558,12 +542,12 @@ async fn post_access_file( UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&mut conn).await, - &String::from("00000000-0000-0000-0000-000000000000"), + &String::from("00000000-0000-0000-0000-000000000000").into(), &mut conn, ) .await; - let token_claims = crate::auth::generate_send_claims(send_id, file_id); + let token_claims = crate::auth::generate_send_claims(&send_id, &file_id); let token = crate::auth::encode_jwt(&token_claims); Ok(Json(json!({ "object": "send-fileDownload", @@ -573,7 +557,7 @@ async fn post_access_file( } #[get("/sends//?")] -async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Option { +async fn download_send(send_id: SendId, file_id: SendFileId, t: &str) -> Option { if let Ok(claims) = crate::auth::decode_send(t) { if claims.sub == format!("{send_id}/{file_id}") { return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).await.ok(); @@ -582,16 +566,21 @@ async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Opt None } -#[put("/sends/", data = "")] -async fn put_send(id: &str, data: Json, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { +#[put("/sends/", data = "")] +async fn put_send( + send_id: SendId, + data: Json, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { enforce_disable_send_policy(&headers, &mut conn).await?; let data: SendData = data.into_inner(); enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?; - let mut send = match Send::find_by_uuid(id, &mut conn).await { - Some(s) => s, - None => err!("Send not found"), + let Some(mut send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else { + err!("Send not found", "Send send_id is invalid or does not belong to user") }; update_send_from_data(&mut send, data, &headers, &mut conn, &nt, UpdateType::SyncSendUpdate).await?; @@ -657,17 +646,12 @@ pub async fn update_send_from_data( Ok(()) } -#[delete("/sends/")] -async fn delete_send(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - let send = match Send::find_by_uuid(id, &mut conn).await { - Some(s) => s, - None => err!("Send not found"), +#[delete("/sends/")] +async fn delete_send(send_id: SendId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { + let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else { + err!("Send not found", "Invalid send uuid, or does not belong to user") }; - if send.user_uuid.as_ref() != Some(&headers.user.uuid) { - err!("Send is not owned by user") - } - send.delete(&mut conn).await?; nt.send_send_update( UpdateType::SyncSendDelete, @@ -681,19 +665,14 @@ async fn delete_send(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_ Ok(()) } -#[put("/sends//remove-password")] -async fn put_remove_password(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { +#[put("/sends//remove-password")] +async fn put_remove_password(send_id: SendId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { enforce_disable_send_policy(&headers, &mut conn).await?; - let mut send = match Send::find_by_uuid(id, &mut conn).await { - Some(s) => s, - None => err!("Send not found"), + let Some(mut send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else { + err!("Send not found", "Invalid send uuid, or does not belong to user") }; - if send.user_uuid.as_ref() != Some(&headers.user.uuid) { - err!("Send is not owned by user") - } - send.set_password(None); send.save(&mut conn).await?; nt.send_send_update( diff --git a/src/api/core/two_factor/authenticator.rs b/src/api/core/two_factor/authenticator.rs index 9d4bd480..3386f7d7 100644 --- a/src/api/core/two_factor/authenticator.rs +++ b/src/api/core/two_factor/authenticator.rs @@ -7,7 +7,7 @@ use crate::{ auth::{ClientIp, Headers}, crypto, db::{ - models::{EventType, TwoFactor, TwoFactorType}, + models::{EventType, TwoFactor, TwoFactorType, UserId}, DbConn, }, util::NumberOrString, @@ -16,7 +16,7 @@ use crate::{ pub use crate::config::CONFIG; pub fn routes() -> Vec { - routes![generate_authenticator, activate_authenticator, activate_authenticator_put,] + routes![generate_authenticator, activate_authenticator, activate_authenticator_put, disable_authenticator] } #[post("/two-factor/get-authenticator", data = "")] @@ -95,7 +95,7 @@ async fn activate_authenticator_put(data: Json, headers } pub async fn validate_totp_code_str( - user_uuid: &str, + user_id: &UserId, totp_code: &str, secret: &str, ip: &ClientIp, @@ -105,11 +105,11 @@ pub async fn validate_totp_code_str( err!("TOTP code is not a number"); } - validate_totp_code(user_uuid, totp_code, secret, ip, conn).await + validate_totp_code(user_id, totp_code, secret, ip, conn).await } pub async fn validate_totp_code( - user_uuid: &str, + user_id: &UserId, totp_code: &str, secret: &str, ip: &ClientIp, @@ -117,16 +117,15 @@ pub async fn validate_totp_code( ) -> EmptyResult { use totp_lite::{totp_custom, Sha1}; - let decoded_secret = match BASE32.decode(secret.as_bytes()) { - Ok(s) => s, - Err(_) => err!("Invalid TOTP secret"), + let Ok(decoded_secret) = BASE32.decode(secret.as_bytes()) else { + err!("Invalid TOTP secret") }; - let mut twofactor = - match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn).await { - Some(tf) => tf, - _ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()), - }; + let mut twofactor = match TwoFactor::find_by_user_and_type(user_id, TwoFactorType::Authenticator as i32, conn).await + { + Some(tf) => tf, + _ => TwoFactor::new(user_id.clone(), TwoFactorType::Authenticator, secret.to_string()), + }; // The amount of steps back and forward in time // Also check if we need to disable time drifted TOTP codes. @@ -176,3 +175,47 @@ pub async fn validate_totp_code( } ); } + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +struct DisableAuthenticatorData { + key: String, + master_password_hash: String, + r#type: NumberOrString, +} + +#[delete("/two-factor/authenticator", data = "")] +async fn disable_authenticator(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { + let user = headers.user; + let type_ = data.r#type.into_i32()?; + + if !user.check_valid_password(&data.master_password_hash) { + err!("Invalid password"); + } + + if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await { + if twofactor.data == data.key { + twofactor.delete(&mut conn).await?; + log_user_event( + EventType::UserDisabled2fa as i32, + &user.uuid, + headers.device.atype, + &headers.ip.ip, + &mut conn, + ) + .await; + } else { + err!(format!("TOTP key for user {} does not match recorded value, cannot deactivate", &user.email)); + } + } + + if TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty() { + super::enforce_2fa_policy(&user, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await?; + } + + Ok(Json(json!({ + "enabled": false, + "keys": type_, + "object": "twoFactorProvider" + }))) +} diff --git a/src/api/core/two_factor/duo.rs b/src/api/core/two_factor/duo.rs index 6de2935d..aa281ae7 100644 --- a/src/api/core/two_factor/duo.rs +++ b/src/api/core/two_factor/duo.rs @@ -11,7 +11,7 @@ use crate::{ auth::Headers, crypto, db::{ - models::{EventType, TwoFactor, TwoFactorType, User}, + models::{EventType, TwoFactor, TwoFactorType, User, UserId}, DbConn, }, error::MapResult, @@ -26,8 +26,8 @@ pub fn routes() -> Vec { #[derive(Serialize, Deserialize)] struct DuoData { host: String, // Duo API hostname - ik: String, // integration key - sk: String, // secret key + ik: String, // client id + sk: String, // client secret } impl DuoData { @@ -111,8 +111,8 @@ async fn get_duo(data: Json, headers: Headers, mut conn: DbCo json!({ "enabled": enabled, "host": data.host, - "secretKey": data.sk, - "integrationKey": data.ik, + "clientSecret": data.sk, + "clientId": data.ik, "object": "twoFactorDuo" }) } else { @@ -129,8 +129,8 @@ async fn get_duo(data: Json, headers: Headers, mut conn: DbCo #[serde(rename_all = "camelCase")] struct EnableDuoData { host: String, - secret_key: String, - integration_key: String, + client_secret: String, + client_id: String, master_password_hash: Option, otp: Option, } @@ -139,8 +139,8 @@ impl From for DuoData { fn from(d: EnableDuoData) -> Self { Self { host: d.host, - ik: d.integration_key, - sk: d.secret_key, + ik: d.client_id, + sk: d.client_secret, } } } @@ -151,7 +151,7 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool { st.is_empty() || s == DISABLED_MESSAGE_DEFAULT } - !empty_or_default(&data.host) && !empty_or_default(&data.secret_key) && !empty_or_default(&data.integration_key) + !empty_or_default(&data.host) && !empty_or_default(&data.client_secret) && !empty_or_default(&data.client_id) } #[post("/two-factor/duo", data = "")] @@ -186,8 +186,8 @@ async fn activate_duo(data: Json, headers: Headers, mut conn: DbC Ok(Json(json!({ "enabled": true, "host": data.host, - "secretKey": data.sk, - "integrationKey": data.ik, + "clientSecret": data.sk, + "clientId": data.ik, "object": "twoFactorDuo" }))) } @@ -228,13 +228,12 @@ const AUTH_PREFIX: &str = "AUTH"; const DUO_PREFIX: &str = "TX"; const APP_PREFIX: &str = "APP"; -async fn get_user_duo_data(uuid: &str, conn: &mut DbConn) -> DuoStatus { +async fn get_user_duo_data(user_id: &UserId, conn: &mut DbConn) -> DuoStatus { let type_ = TwoFactorType::Duo as i32; // If the user doesn't have an entry, disabled - let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn).await { - Some(t) => t, - None => return DuoStatus::Disabled(DuoData::global().is_some()), + let Some(twofactor) = TwoFactor::find_by_user_and_type(user_id, type_, conn).await else { + return DuoStatus::Disabled(DuoData::global().is_some()); }; // If the user has the required values, we use those @@ -333,14 +332,12 @@ fn parse_duo_values(key: &str, val: &str, ikey: &str, prefix: &str, time: i64) - err!("Prefixes don't match") } - let cookie_vec = match BASE64.decode(u_b64.as_bytes()) { - Ok(c) => c, - Err(_) => err!("Invalid Duo cookie encoding"), + let Ok(cookie_vec) = BASE64.decode(u_b64.as_bytes()) else { + err!("Invalid Duo cookie encoding") }; - let cookie = match String::from_utf8(cookie_vec) { - Ok(c) => c, - Err(_) => err!("Invalid Duo cookie encoding"), + let Ok(cookie) = String::from_utf8(cookie_vec) else { + err!("Invalid Duo cookie encoding") }; let cookie_split: Vec<&str> = cookie.split('|').collect(); diff --git a/src/api/core/two_factor/duo_oidc.rs b/src/api/core/two_factor/duo_oidc.rs index eb7fb329..e90d229f 100644 --- a/src/api/core/two_factor/duo_oidc.rs +++ b/src/api/core/two_factor/duo_oidc.rs @@ -10,7 +10,7 @@ use crate::{ api::{core::two_factor::duo::get_duo_keys_email, EmptyResult}, crypto, db::{ - models::{EventType, TwoFactorDuoContext}, + models::{DeviceId, EventType, TwoFactorDuoContext}, DbConn, DbPool, }, error::Error, @@ -379,7 +379,7 @@ fn make_callback_url(client_name: &str) -> Result { pub async fn get_duo_auth_url( email: &str, client_id: &str, - device_identifier: &String, + device_identifier: &DeviceId, conn: &mut DbConn, ) -> Result { let (ik, sk, _, host) = get_duo_keys_email(email, conn).await?; @@ -417,7 +417,7 @@ pub async fn validate_duo_login( email: &str, two_factor_token: &str, client_id: &str, - device_identifier: &str, + device_identifier: &DeviceId, conn: &mut DbConn, ) -> EmptyResult { // Result supplied to us by clients in the form "|" diff --git a/src/api/core/two_factor/email.rs b/src/api/core/two_factor/email.rs index 293c0671..d6470a28 100644 --- a/src/api/core/two_factor/email.rs +++ b/src/api/core/two_factor/email.rs @@ -10,7 +10,7 @@ use crate::{ auth::Headers, crypto, db::{ - models::{EventType, TwoFactor, TwoFactorType, User}, + models::{EventType, TwoFactor, TwoFactorType, User, UserId}, DbConn, }, error::{Error, MapResult}, @@ -40,9 +40,8 @@ async fn send_email_login(data: Json, mut conn: DbConn) -> E use crate::db::models::User; // Get the user - let user = match User::find_by_mail(&data.email, &mut conn).await { - Some(user) => user, - None => err!("Username or password is incorrect. Try again."), + let Some(user) = User::find_by_mail(&data.email, &mut conn).await else { + err!("Username or password is incorrect. Try again.") }; // Check password @@ -60,10 +59,9 @@ async fn send_email_login(data: Json, mut conn: DbConn) -> E } /// Generate the token, save the data for later verification and send email to user -pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { +pub async fn send_token(user_id: &UserId, conn: &mut DbConn) -> EmptyResult { let type_ = TwoFactorType::Email as i32; - let mut twofactor = - TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await.map_res("Two factor not found")?; + let mut twofactor = TwoFactor::find_by_user_and_type(user_id, type_, conn).await.map_res("Two factor not found")?; let generated_token = crypto::generate_email_token(CONFIG.email_token_size()); @@ -174,9 +172,8 @@ async fn email(data: Json, headers: Headers, mut conn: DbConn) -> Jso let mut email_data = EmailTokenData::from_json(&twofactor.data)?; - let issued_token = match &email_data.last_token { - Some(t) => t, - _ => err!("No token available"), + let Some(issued_token) = &email_data.last_token else { + err!("No token available") }; if !crypto::ct_eq(issued_token, data.token) { @@ -200,19 +197,18 @@ async fn email(data: Json, headers: Headers, mut conn: DbConn) -> Jso } /// Validate the email code when used as TwoFactor token mechanism -pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &mut DbConn) -> EmptyResult { +pub async fn validate_email_code_str(user_id: &UserId, token: &str, data: &str, conn: &mut DbConn) -> EmptyResult { let mut email_data = EmailTokenData::from_json(data)?; - let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Email as i32, conn) + let mut twofactor = TwoFactor::find_by_user_and_type(user_id, TwoFactorType::Email as i32, conn) .await .map_res("Two factor not found")?; - let issued_token = match &email_data.last_token { - Some(t) => t, - _ => err!( + let Some(issued_token) = &email_data.last_token else { + err!( "No token available", ErrorEvent { event: EventType::UserFailedLogIn2fa } - ), + ) }; if !crypto::ct_eq(issued_token, token) { @@ -330,8 +326,8 @@ pub fn obscure_email(email: &str) -> String { format!("{}@{}", new_name, &domain) } -pub async fn find_and_activate_email_2fa(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { - if let Some(user) = User::find_by_uuid(user_uuid, conn).await { +pub async fn find_and_activate_email_2fa(user_id: &UserId, conn: &mut DbConn) -> EmptyResult { + if let Some(user) = User::find_by_uuid(user_id, conn).await { activate_email_2fa(&user, conn).await } else { err!("User not found!"); diff --git a/src/api/core/two_factor/mod.rs b/src/api/core/two_factor/mod.rs index e3795eb8..cfe0be86 100644 --- a/src/api/core/two_factor/mod.rs +++ b/src/api/core/two_factor/mod.rs @@ -85,9 +85,8 @@ async fn recover(data: Json, client_headers: ClientHeaders, mu use crate::db::models::User; // Get the user - let mut user = match User::find_by_mail(&data.email, &mut conn).await { - Some(user) => user, - None => err!("Username or password is incorrect. Try again."), + let Some(mut user) = User::find_by_mail(&data.email, &mut conn).await else { + err!("Username or password is incorrect. Try again.") }; // Check password @@ -174,17 +173,16 @@ async fn disable_twofactor_put(data: Json, headers: Header pub async fn enforce_2fa_policy( user: &User, - act_uuid: &str, + act_user_id: &UserId, device_type: i32, ip: &std::net::IpAddr, conn: &mut DbConn, ) -> EmptyResult { - for member in UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, conn) - .await - .into_iter() + for member in + Membership::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, conn).await.into_iter() { // Policy only applies to non-Owner/non-Admin members who have accepted joining the org - if member.atype < UserOrgType::Admin { + if member.atype < MembershipType::Admin { if CONFIG.mail_enabled() { let org = Organization::find_by_uuid(&member.org_uuid, conn).await.unwrap(); mail::send_2fa_removed_from_org(&user.email, &org.name).await?; @@ -197,7 +195,7 @@ pub async fn enforce_2fa_policy( EventType::OrganizationUserRevoked as i32, &member.uuid, &member.org_uuid, - act_uuid, + act_user_id, device_type, ip, conn, @@ -210,16 +208,16 @@ pub async fn enforce_2fa_policy( } pub async fn enforce_2fa_policy_for_org( - org_uuid: &str, - act_uuid: &str, + org_id: &OrganizationId, + act_user_id: &UserId, device_type: i32, ip: &std::net::IpAddr, conn: &mut DbConn, ) -> EmptyResult { - let org = Organization::find_by_uuid(org_uuid, conn).await.unwrap(); - for member in UserOrganization::find_confirmed_by_org(org_uuid, conn).await.into_iter() { + let org = Organization::find_by_uuid(org_id, conn).await.unwrap(); + for member in Membership::find_confirmed_by_org(org_id, conn).await.into_iter() { // Don't enforce the policy for Admins and Owners. - if member.atype < UserOrgType::Admin && TwoFactor::find_by_user(&member.user_uuid, conn).await.is_empty() { + if member.atype < MembershipType::Admin && TwoFactor::find_by_user(&member.user_uuid, conn).await.is_empty() { if CONFIG.mail_enabled() { let user = User::find_by_uuid(&member.user_uuid, conn).await.unwrap(); mail::send_2fa_removed_from_org(&user.email, &org.name).await?; @@ -231,8 +229,8 @@ pub async fn enforce_2fa_policy_for_org( log_event( EventType::OrganizationUserRevoked as i32, &member.uuid, - org_uuid, - act_uuid, + org_id, + act_user_id, device_type, ip, conn, diff --git a/src/api/core/two_factor/protected_actions.rs b/src/api/core/two_factor/protected_actions.rs index 1a1d59c8..5e4a65be 100644 --- a/src/api/core/two_factor/protected_actions.rs +++ b/src/api/core/two_factor/protected_actions.rs @@ -6,7 +6,7 @@ use crate::{ auth::Headers, crypto, db::{ - models::{TwoFactor, TwoFactorType}, + models::{TwoFactor, TwoFactorType, UserId}, DbConn, }, error::{Error, MapResult}, @@ -104,11 +104,11 @@ async fn verify_otp(data: Json, headers: Headers, mut con pub async fn validate_protected_action_otp( otp: &str, - user_uuid: &str, + user_id: &UserId, delete_if_valid: bool, conn: &mut DbConn, ) -> EmptyResult { - let pa = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::ProtectedActions as i32, conn) + let pa = TwoFactor::find_by_user_and_type(user_id, TwoFactorType::ProtectedActions as i32, conn) .await .map_res("Protected action token not found, try sending the code again or restart the process")?; let mut pa_data = ProtectedActionData::from_json(&pa.data)?; diff --git a/src/api/core/two_factor/webauthn.rs b/src/api/core/two_factor/webauthn.rs index 52ca70c4..614c5df3 100644 --- a/src/api/core/two_factor/webauthn.rs +++ b/src/api/core/two_factor/webauthn.rs @@ -11,7 +11,7 @@ use crate::{ }, auth::Headers, db::{ - models::{EventType, TwoFactor, TwoFactorType}, + models::{EventType, TwoFactor, TwoFactorType, UserId}, DbConn, }, error::Error, @@ -148,7 +148,7 @@ async fn generate_webauthn_challenge(data: Json, headers: Hea )?; let type_ = TwoFactorType::WebauthnRegisterChallenge; - TwoFactor::new(user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?; + TwoFactor::new(user.uuid.clone(), type_, serde_json::to_string(&state)?).save(&mut conn).await?; let mut challenge_value = serde_json::to_value(challenge.public_key)?; challenge_value["status"] = "ok".into(); @@ -309,17 +309,16 @@ async fn delete_webauthn(data: Json, headers: Headers, mut conn: err!("Invalid password"); } - let mut tf = - match TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &mut conn).await { - Some(tf) => tf, - None => err!("Webauthn data not found!"), - }; + let Some(mut tf) = + TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &mut conn).await + else { + err!("Webauthn data not found!") + }; let mut data: Vec = serde_json::from_str(&tf.data)?; - let item_pos = match data.iter().position(|r| r.id == id) { - Some(p) => p, - None => err!("Webauthn entry not found"), + let Some(item_pos) = data.iter().position(|r| r.id == id) else { + err!("Webauthn entry not found") }; let removed_item = data.remove(item_pos); @@ -353,20 +352,20 @@ async fn delete_webauthn(data: Json, headers: Headers, mut conn: } pub async fn get_webauthn_registrations( - user_uuid: &str, + user_id: &UserId, conn: &mut DbConn, ) -> Result<(bool, Vec), Error> { let type_ = TwoFactorType::Webauthn as i32; - match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await { + match TwoFactor::find_by_user_and_type(user_id, type_, conn).await { Some(tf) => Ok((tf.enabled, serde_json::from_str(&tf.data)?)), None => Ok((false, Vec::new())), // If no data, return empty list } } -pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> JsonResult { +pub async fn generate_webauthn_login(user_id: &UserId, conn: &mut DbConn) -> JsonResult { // Load saved credentials let creds: Vec = - get_webauthn_registrations(user_uuid, conn).await?.1.into_iter().map(|r| r.credential).collect(); + get_webauthn_registrations(user_id, conn).await?.1.into_iter().map(|r| r.credential).collect(); if creds.is_empty() { err!("No Webauthn devices registered") @@ -377,7 +376,7 @@ pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> Json let (response, state) = WebauthnConfig::load().generate_challenge_authenticate_options(creds, Some(ext))?; // Save the challenge state for later validation - TwoFactor::new(user_uuid.into(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?) + TwoFactor::new(user_id.clone(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?) .save(conn) .await?; @@ -385,9 +384,9 @@ pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> Json Ok(Json(serde_json::to_value(response.public_key)?)) } -pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut DbConn) -> EmptyResult { +pub async fn validate_webauthn_login(user_id: &UserId, response: &str, conn: &mut DbConn) -> EmptyResult { let type_ = TwoFactorType::WebauthnLoginChallenge as i32; - let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await { + let state = match TwoFactor::find_by_user_and_type(user_id, type_, conn).await { Some(tf) => { let state: AuthenticationState = serde_json::from_str(&tf.data)?; tf.delete(conn).await?; @@ -404,7 +403,7 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut let rsp: PublicKeyCredentialCopy = serde_json::from_str(response)?; let rsp: PublicKeyCredential = rsp.into(); - let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1; + let mut registrations = get_webauthn_registrations(user_id, conn).await?.1; // If the credential we received is migrated from U2F, enable the U2F compatibility //let use_u2f = registrations.iter().any(|r| r.migrated && r.credential.cred_id == rsp.raw_id.0); @@ -414,7 +413,7 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut if ®.credential.cred_id == cred_id { reg.credential.counter = auth_data.counter; - TwoFactor::new(user_uuid.to_string(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?) + TwoFactor::new(user_id.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?) .save(conn) .await?; return Ok(()); diff --git a/src/api/core/two_factor/yubikey.rs b/src/api/core/two_factor/yubikey.rs index b2940353..a6d9898d 100644 --- a/src/api/core/two_factor/yubikey.rs +++ b/src/api/core/two_factor/yubikey.rs @@ -92,10 +92,10 @@ async fn generate_yubikey(data: Json, headers: Headers, mut c data.validate(&user, false, &mut conn).await?; - let user_uuid = &user.uuid; + let user_id = &user.uuid; let yubikey_type = TwoFactorType::YubiKey as i32; - let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &mut conn).await; + let r = TwoFactor::find_by_user_and_type(user_id, yubikey_type, &mut conn).await; if let Some(r) = r { let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?; diff --git a/src/api/icons.rs b/src/api/icons.rs index 6afbaa9f..0b437d53 100644 --- a/src/api/icons.rs +++ b/src/api/icons.rs @@ -19,7 +19,7 @@ use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, }; -use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer}; +use html5gum::{Emitter, HtmlString, Readable, StringReader, Tokenizer}; use crate::{ error::Error, @@ -63,6 +63,9 @@ static CLIENT: Lazy = Lazy::new(|| { // Build Regex only once since this takes a lot of time. static ICON_SIZE_REGEX: Lazy = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap()); +// The function name `icon_external` is checked in the `on_response` function in `AppHeaders` +// It is used to prevent sending a specific header which breaks icon downloads. +// If this function needs to be renamed, also adjust the code in `util.rs` #[get("//icon.png")] fn icon_external(domain: &str) -> Option { if !is_valid_domain(domain) { @@ -261,11 +264,7 @@ impl Icon { } } -fn get_favicons_node( - dom: InfallibleTokenizer, FaviconEmitter>, - icons: &mut Vec, - url: &url::Url, -) { +fn get_favicons_node(dom: Tokenizer, FaviconEmitter>, icons: &mut Vec, url: &url::Url) { const TAG_LINK: &[u8] = b"link"; const TAG_BASE: &[u8] = b"base"; const TAG_HEAD: &[u8] = b"head"; @@ -274,7 +273,7 @@ fn get_favicons_node( let mut base_url = url.clone(); let mut icon_tags: Vec = Vec::new(); - for token in dom { + for Ok(token) in dom { let tag_name: &[u8] = &token.tag.name; match tag_name { TAG_LINK => { @@ -295,9 +294,7 @@ fn get_favicons_node( TAG_HEAD if token.closing => { break; } - _ => { - continue; - } + _ => {} } } @@ -401,7 +398,7 @@ async fn get_icon_url(domain: &str) -> Result { // 384KB should be more than enough for the HTML, though as we only really need the HTML header. let limited_reader = stream_to_bytes_limit(content, 384 * 1024).await?.to_vec(); - let dom = Tokenizer::new_with_emitter(limited_reader.to_reader(), FaviconEmitter::default()).infallible(); + let dom = Tokenizer::new_with_emitter(limited_reader.to_reader(), FaviconEmitter::default()); get_favicons_node(dom, &mut iconlist, &url); } else { // Add the default favicon.ico to the list with just the given domain @@ -662,7 +659,7 @@ impl reqwest::cookie::CookieStore for Jar { /// The FaviconEmitter is using an optimized version of the DefaultEmitter. /// This prevents emitting tags like comments, doctype and also strings between the tags. /// But it will also only emit the tags we need and only if they have the correct attributes -/// Therefor parsing the HTML content is faster. +/// Therefore parsing the HTML content is faster. use std::collections::BTreeMap; #[derive(Default)] diff --git a/src/api/identity.rs b/src/api/identity.rs index 02c8529c..03923456 100644 --- a/src/api/identity.rs +++ b/src/api/identity.rs @@ -31,7 +31,7 @@ pub fn routes() -> Vec { async fn login(data: Form, client_header: ClientHeaders, mut conn: DbConn) -> JsonResult { let data: ConnectData = data.into_inner(); - let mut user_uuid: Option = None; + let mut user_id: Option = None; let login_result = match data.grant_type.as_ref() { "refresh_token" => { @@ -48,7 +48,7 @@ async fn login(data: Form, client_header: ClientHeaders, mut conn: _check_is_some(&data.device_name, "device_name cannot be blank")?; _check_is_some(&data.device_type, "device_type cannot be blank")?; - _password_login(data, &mut user_uuid, &mut conn, &client_header.ip).await + _password_login(data, &mut user_id, &mut conn, &client_header.ip).await } "client_credentials" => { _check_is_some(&data.client_id, "client_id cannot be blank")?; @@ -59,17 +59,17 @@ async fn login(data: Form, client_header: ClientHeaders, mut conn: _check_is_some(&data.device_name, "device_name cannot be blank")?; _check_is_some(&data.device_type, "device_type cannot be blank")?; - _api_key_login(data, &mut user_uuid, &mut conn, &client_header.ip).await + _api_key_login(data, &mut user_id, &mut conn, &client_header.ip).await } t => err!("Invalid type", t), }; - if let Some(user_uuid) = user_uuid { + if let Some(user_id) = user_id { match &login_result { Ok(_) => { log_user_event( EventType::UserLoggedIn as i32, - &user_uuid, + &user_id, client_header.device_type, &client_header.ip.ip, &mut conn, @@ -80,7 +80,7 @@ async fn login(data: Form, client_header: ClientHeaders, mut conn: if let Some(ev) = e.get_event() { log_user_event( ev.event as i32, - &user_uuid, + &user_id, client_header.device_type, &client_header.ip.ip, &mut conn, @@ -111,7 +111,7 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult { // Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out // See: https://github.com/dani-garcia/vaultwarden/issues/4156 // --- - // let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await; + // let members = Membership::find_confirmed_by_user(&user.uuid, conn).await; let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec); device.save(conn).await?; @@ -141,7 +141,7 @@ struct MasterPasswordPolicy { async fn _password_login( data: ConnectData, - user_uuid: &mut Option, + user_id: &mut Option, conn: &mut DbConn, ip: &ClientIp, ) -> JsonResult { @@ -157,13 +157,12 @@ async fn _password_login( // Get the user let username = data.username.as_ref().unwrap().trim(); - let mut user = match User::find_by_mail(username, conn).await { - Some(user) => user, - None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)), + let Some(mut user) = User::find_by_mail(username, conn).await else { + err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)) }; - // Set the user_uuid here to be passed back used for event logging. - *user_uuid = Some(user.uuid.clone()); + // Set the user_id here to be passed back used for event logging. + *user_id = Some(user.uuid.clone()); // Check if the user is disabled if !user.enabled { @@ -179,8 +178,8 @@ async fn _password_login( let password = data.password.as_ref().unwrap(); // If we get an auth request, we don't check the user's password, but the access code of the auth request - if let Some(ref auth_request_uuid) = data.auth_request { - let Some(auth_request) = AuthRequest::find_by_uuid(auth_request_uuid.as_str(), conn).await else { + if let Some(ref auth_request_id) = data.auth_request { + let Some(auth_request) = AuthRequest::find_by_uuid_and_user(auth_request_id, &user.uuid, conn).await else { err!( "Auth request not found. Try again.", format!("IP: {}. Username: {}.", ip.ip, username), @@ -291,7 +290,7 @@ async fn _password_login( // Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out // See: https://github.com/dani-garcia/vaultwarden/issues/4156 // --- - // let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await; + // let members = Membership::find_confirmed_by_user(&user.uuid, conn).await; let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec); device.save(conn).await?; @@ -359,7 +358,7 @@ async fn _password_login( async fn _api_key_login( data: ConnectData, - user_uuid: &mut Option, + user_id: &mut Option, conn: &mut DbConn, ip: &ClientIp, ) -> JsonResult { @@ -368,7 +367,7 @@ async fn _api_key_login( // Validate scope match data.scope.as_ref().unwrap().as_ref() { - "api" => _user_api_key_login(data, user_uuid, conn, ip).await, + "api" => _user_api_key_login(data, user_id, conn, ip).await, "api.organization" => _organization_api_key_login(data, conn, ip).await, _ => err!("Scope not supported"), } @@ -376,23 +375,22 @@ async fn _api_key_login( async fn _user_api_key_login( data: ConnectData, - user_uuid: &mut Option, + user_id: &mut Option, conn: &mut DbConn, ip: &ClientIp, ) -> JsonResult { // Get the user via the client_id let client_id = data.client_id.as_ref().unwrap(); - let client_user_uuid = match client_id.strip_prefix("user.") { - Some(uuid) => uuid, - None => err!("Malformed client_id", format!("IP: {}.", ip.ip)), + let Some(client_user_id) = client_id.strip_prefix("user.") else { + err!("Malformed client_id", format!("IP: {}.", ip.ip)) }; - let user = match User::find_by_uuid(client_user_uuid, conn).await { - Some(user) => user, - None => err!("Invalid client_id", format!("IP: {}.", ip.ip)), + let client_user_id: UserId = client_user_id.into(); + let Some(user) = User::find_by_uuid(&client_user_id, conn).await else { + err!("Invalid client_id", format!("IP: {}.", ip.ip)) }; - // Set the user_uuid here to be passed back used for event logging. - *user_uuid = Some(user.uuid.clone()); + // Set the user_id here to be passed back used for event logging. + *user_id = Some(user.uuid.clone()); // Check if the user is disabled if !user.enabled { @@ -442,7 +440,7 @@ async fn _user_api_key_login( // Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out // See: https://github.com/dani-garcia/vaultwarden/issues/4156 // --- - // let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await; + // let members = Membership::find_confirmed_by_user(&user.uuid, conn).await; let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec); device.save(conn).await?; @@ -471,13 +469,12 @@ async fn _user_api_key_login( async fn _organization_api_key_login(data: ConnectData, conn: &mut DbConn, ip: &ClientIp) -> JsonResult { // Get the org via the client_id let client_id = data.client_id.as_ref().unwrap(); - let org_uuid = match client_id.strip_prefix("organization.") { - Some(uuid) => uuid, - None => err!("Malformed client_id", format!("IP: {}.", ip.ip)), + let Some(org_id) = client_id.strip_prefix("organization.") else { + err!("Malformed client_id", format!("IP: {}.", ip.ip)) }; - let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_uuid, conn).await { - Some(org_api_key) => org_api_key, - None => err!("Invalid client_id", format!("IP: {}.", ip.ip)), + let org_id: OrganizationId = org_id.to_string().into(); + let Some(org_api_key) = OrganizationApiKey::find_by_org_uuid(&org_id, conn).await else { + err!("Invalid client_id", format!("IP: {}.", ip.ip)) }; // Check API key. @@ -618,7 +615,7 @@ fn _selected_data(tf: Option) -> ApiResult { async fn _json_err_twofactor( providers: &[i32], - user_uuid: &str, + user_id: &UserId, data: &ConnectData, conn: &mut DbConn, ) -> ApiResult { @@ -639,12 +636,12 @@ async fn _json_err_twofactor( Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ } Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => { - let request = webauthn::generate_webauthn_login(user_uuid, conn).await?; + let request = webauthn::generate_webauthn_login(user_id, conn).await?; result["TwoFactorProviders2"][provider.to_string()] = request.0; } Some(TwoFactorType::Duo) => { - let email = match User::find_by_uuid(user_uuid, conn).await { + let email = match User::find_by_uuid(user_id, conn).await { Some(u) => u.email, None => err!("User does not exist"), }; @@ -676,9 +673,8 @@ async fn _json_err_twofactor( } Some(tf_type @ TwoFactorType::YubiKey) => { - let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await { - Some(tf) => tf, - None => err!("No YubiKey devices registered"), + let Some(twofactor) = TwoFactor::find_by_user_and_type(user_id, tf_type as i32, conn).await else { + err!("No YubiKey devices registered") }; let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?; @@ -689,14 +685,13 @@ async fn _json_err_twofactor( } Some(tf_type @ TwoFactorType::Email) => { - let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await { - Some(tf) => tf, - None => err!("No twofactor email registered"), + let Some(twofactor) = TwoFactor::find_by_user_and_type(user_id, tf_type as i32, conn).await else { + err!("No twofactor email registered") }; // Send email immediately if email is the only 2FA option if providers.len() == 1 { - email::send_token(user_uuid, conn).await? + email::send_token(user_id, conn).await? } let email_data = email::EmailTokenData::from_json(&twofactor.data)?; @@ -806,7 +801,7 @@ struct ConnectData { #[field(name = uncased("device_identifier"))] #[field(name = uncased("deviceidentifier"))] - device_identifier: Option, + device_identifier: Option, #[field(name = uncased("device_name"))] #[field(name = uncased("devicename"))] device_name: Option, @@ -829,7 +824,7 @@ struct ConnectData { #[field(name = uncased("twofactorremember"))] two_factor_remember: Option, #[field(name = uncased("authrequest"))] - auth_request: Option, + auth_request: Option, } fn _check_is_some(value: &Option, msg: &str) -> EmptyResult { diff --git a/src/api/notifications.rs b/src/api/notifications.rs index 8c925e37..de97be6f 100644 --- a/src/api/notifications.rs +++ b/src/api/notifications.rs @@ -10,7 +10,7 @@ use rocket_ws::{Message, WebSocket}; use crate::{ auth::{ClientIp, WsAccessTokenHeader}, db::{ - models::{Cipher, Folder, Send as DbSend, User}, + models::{AuthRequestId, Cipher, CollectionId, DeviceId, Folder, Send as DbSend, User, UserId}, DbConn, }, Error, CONFIG, @@ -53,13 +53,13 @@ struct WsAccessToken { struct WSEntryMapGuard { users: Arc, - user_uuid: String, + user_uuid: UserId, entry_uuid: uuid::Uuid, addr: IpAddr, } impl WSEntryMapGuard { - fn new(users: Arc, user_uuid: String, entry_uuid: uuid::Uuid, addr: IpAddr) -> Self { + fn new(users: Arc, user_uuid: UserId, entry_uuid: uuid::Uuid, addr: IpAddr) -> Self { Self { users, user_uuid, @@ -72,7 +72,7 @@ impl WSEntryMapGuard { impl Drop for WSEntryMapGuard { fn drop(&mut self) { info!("Closing WS connection from {}", self.addr); - if let Some(mut entry) = self.users.map.get_mut(&self.user_uuid) { + if let Some(mut entry) = self.users.map.get_mut(self.user_uuid.as_ref()) { entry.retain(|(uuid, _)| uuid != &self.entry_uuid); } } @@ -101,6 +101,7 @@ impl Drop for WSAnonymousEntryMapGuard { } } +#[allow(tail_expr_drop_order)] #[get("/hub?")] fn websockets_hub<'r>( ws: WebSocket, @@ -129,7 +130,7 @@ fn websockets_hub<'r>( // Add a channel to send messages to this client to the map let entry_uuid = uuid::Uuid::new_v4(); let (tx, rx) = tokio::sync::mpsc::channel::(100); - users.map.entry(claims.sub.clone()).or_default().push((entry_uuid, tx)); + users.map.entry(claims.sub.to_string()).or_default().push((entry_uuid, tx)); // Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map (rx, WSEntryMapGuard::new(users, claims.sub, entry_uuid, addr)) @@ -156,7 +157,6 @@ fn websockets_hub<'r>( if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) { yield Message::binary(INITIAL_RESPONSE); - continue; } } @@ -186,6 +186,7 @@ fn websockets_hub<'r>( }) } +#[allow(tail_expr_drop_order)] #[get("/anonymous-hub?")] fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> Result { let addr = ip.ip; @@ -223,7 +224,6 @@ fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> R if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) { yield Message::binary(INITIAL_RESPONSE); - continue; } } @@ -290,7 +290,7 @@ fn serialize(val: Value) -> Vec { fn serialize_date(date: NaiveDateTime) -> Value { let seconds: i64 = date.and_utc().timestamp(); let nanos: i64 = date.and_utc().timestamp_subsec_nanos().into(); - let timestamp = nanos << 34 | seconds; + let timestamp = (nanos << 34) | seconds; let bs = timestamp.to_be_bytes(); @@ -328,8 +328,8 @@ pub struct WebSocketUsers { } impl WebSocketUsers { - async fn send_update(&self, user_uuid: &str, data: &[u8]) { - if let Some(user) = self.map.get(user_uuid).map(|v| v.clone()) { + async fn send_update(&self, user_id: &UserId, data: &[u8]) { + if let Some(user) = self.map.get(user_id.as_ref()).map(|v| v.clone()) { for (_, sender) in user.iter() { if let Err(e) = sender.send(Message::binary(data)).await { error!("Error sending WS update {e}"); @@ -345,7 +345,7 @@ impl WebSocketUsers { return; } let data = create_update( - vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))], + vec![("UserId".into(), user.uuid.to_string().into()), ("Date".into(), serialize_date(user.updated_at))], ut, None, ); @@ -359,15 +359,15 @@ impl WebSocketUsers { } } - pub async fn send_logout(&self, user: &User, acting_device_uuid: Option) { + pub async fn send_logout(&self, user: &User, acting_device_id: Option) { // Skip any processing if both WebSockets and Push are not active if *NOTIFICATIONS_DISABLED { return; } let data = create_update( - vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))], + vec![("UserId".into(), user.uuid.to_string().into()), ("Date".into(), serialize_date(user.updated_at))], UpdateType::LogOut, - acting_device_uuid.clone(), + acting_device_id.clone(), ); if CONFIG.enable_websocket() { @@ -375,7 +375,7 @@ impl WebSocketUsers { } if CONFIG.push_enabled() { - push_logout(user, acting_device_uuid); + push_logout(user, acting_device_id.clone()); } } @@ -383,7 +383,7 @@ impl WebSocketUsers { &self, ut: UpdateType, folder: &Folder, - acting_device_uuid: &String, + acting_device_id: &DeviceId, conn: &mut DbConn, ) { // Skip any processing if both WebSockets and Push are not active @@ -392,12 +392,12 @@ impl WebSocketUsers { } let data = create_update( vec![ - ("Id".into(), folder.uuid.clone().into()), - ("UserId".into(), folder.user_uuid.clone().into()), + ("Id".into(), folder.uuid.to_string().into()), + ("UserId".into(), folder.user_uuid.to_string().into()), ("RevisionDate".into(), serialize_date(folder.updated_at)), ], ut, - Some(acting_device_uuid.into()), + Some(acting_device_id.clone()), ); if CONFIG.enable_websocket() { @@ -405,7 +405,7 @@ impl WebSocketUsers { } if CONFIG.push_enabled() { - push_folder_update(ut, folder, acting_device_uuid, conn).await; + push_folder_update(ut, folder, acting_device_id, conn).await; } } @@ -413,48 +413,48 @@ impl WebSocketUsers { &self, ut: UpdateType, cipher: &Cipher, - user_uuids: &[String], - acting_device_uuid: &String, - collection_uuids: Option>, + user_ids: &[UserId], + acting_device_id: &DeviceId, + collection_uuids: Option>, conn: &mut DbConn, ) { // Skip any processing if both WebSockets and Push are not active if *NOTIFICATIONS_DISABLED { return; } - let org_uuid = convert_option(cipher.organization_uuid.clone()); + let org_id = convert_option(cipher.organization_uuid.as_deref()); // Depending if there are collections provided or not, we need to have different values for the following variables. // The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change. - let (user_uuid, collection_uuids, revision_date) = if let Some(collection_uuids) = collection_uuids { + let (user_id, collection_uuids, revision_date) = if let Some(collection_uuids) = collection_uuids { ( Value::Nil, - Value::Array(collection_uuids.into_iter().map(|v| v.into()).collect::>()), + Value::Array(collection_uuids.into_iter().map(|v| v.to_string().into()).collect::>()), serialize_date(Utc::now().naive_utc()), ) } else { - (convert_option(cipher.user_uuid.clone()), Value::Nil, serialize_date(cipher.updated_at)) + (convert_option(cipher.user_uuid.as_deref()), Value::Nil, serialize_date(cipher.updated_at)) }; let data = create_update( vec![ - ("Id".into(), cipher.uuid.clone().into()), - ("UserId".into(), user_uuid), - ("OrganizationId".into(), org_uuid), + ("Id".into(), cipher.uuid.to_string().into()), + ("UserId".into(), user_id), + ("OrganizationId".into(), org_id), ("CollectionIds".into(), collection_uuids), ("RevisionDate".into(), revision_date), ], ut, - Some(acting_device_uuid.into()), + Some(acting_device_id.clone()), ); if CONFIG.enable_websocket() { - for uuid in user_uuids { + for uuid in user_ids { self.send_update(uuid, &data).await; } } - if CONFIG.push_enabled() && user_uuids.len() == 1 { - push_cipher_update(ut, cipher, acting_device_uuid, conn).await; + if CONFIG.push_enabled() && user_ids.len() == 1 { + push_cipher_update(ut, cipher, acting_device_id, conn).await; } } @@ -462,20 +462,20 @@ impl WebSocketUsers { &self, ut: UpdateType, send: &DbSend, - user_uuids: &[String], - acting_device_uuid: &String, + user_ids: &[UserId], + acting_device_id: &DeviceId, conn: &mut DbConn, ) { // Skip any processing if both WebSockets and Push are not active if *NOTIFICATIONS_DISABLED { return; } - let user_uuid = convert_option(send.user_uuid.clone()); + let user_id = convert_option(send.user_uuid.as_deref()); let data = create_update( vec![ - ("Id".into(), send.uuid.clone().into()), - ("UserId".into(), user_uuid), + ("Id".into(), send.uuid.to_string().into()), + ("UserId".into(), user_id), ("RevisionDate".into(), serialize_date(send.revision_date)), ], ut, @@ -483,20 +483,20 @@ impl WebSocketUsers { ); if CONFIG.enable_websocket() { - for uuid in user_uuids { + for uuid in user_ids { self.send_update(uuid, &data).await; } } - if CONFIG.push_enabled() && user_uuids.len() == 1 { - push_send_update(ut, send, acting_device_uuid, conn).await; + if CONFIG.push_enabled() && user_ids.len() == 1 { + push_send_update(ut, send, acting_device_id, conn).await; } } pub async fn send_auth_request( &self, - user_uuid: &String, + user_id: &UserId, auth_request_uuid: &String, - acting_device_uuid: &String, + acting_device_id: &DeviceId, conn: &mut DbConn, ) { // Skip any processing if both WebSockets and Push are not active @@ -504,24 +504,24 @@ impl WebSocketUsers { return; } let data = create_update( - vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_uuid.clone().into())], + vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_id.to_string().into())], UpdateType::AuthRequest, - Some(acting_device_uuid.to_string()), + Some(acting_device_id.clone()), ); if CONFIG.enable_websocket() { - self.send_update(user_uuid, &data).await; + self.send_update(user_id, &data).await; } if CONFIG.push_enabled() { - push_auth_request(user_uuid.to_string(), auth_request_uuid.to_string(), conn).await; + push_auth_request(user_id.clone(), auth_request_uuid.to_string(), conn).await; } } pub async fn send_auth_response( &self, - user_uuid: &String, - auth_response_uuid: &str, - approving_device_uuid: String, + user_id: &UserId, + auth_request_id: &AuthRequestId, + approving_device_id: &DeviceId, conn: &mut DbConn, ) { // Skip any processing if both WebSockets and Push are not active @@ -529,17 +529,16 @@ impl WebSocketUsers { return; } let data = create_update( - vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())], + vec![("Id".into(), auth_request_id.to_string().into()), ("UserId".into(), user_id.to_string().into())], UpdateType::AuthRequestResponse, - approving_device_uuid.clone().into(), + Some(approving_device_id.clone()), ); if CONFIG.enable_websocket() { - self.send_update(auth_response_uuid, &data).await; + self.send_update(user_id, &data).await; } if CONFIG.push_enabled() { - push_auth_response(user_uuid.to_string(), auth_response_uuid.to_string(), approving_device_uuid, conn) - .await; + push_auth_response(user_id, auth_request_id, approving_device_id, conn).await; } } } @@ -558,16 +557,16 @@ impl AnonymousWebSocketSubscriptions { } } - pub async fn send_auth_response(&self, user_uuid: &String, auth_response_uuid: &str) { + pub async fn send_auth_response(&self, user_id: &UserId, auth_request_id: &AuthRequestId) { if !CONFIG.enable_websocket() { return; } let data = create_anonymous_update( - vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())], + vec![("Id".into(), auth_request_id.to_string().into()), ("UserId".into(), user_id.to_string().into())], UpdateType::AuthRequestResponse, - user_uuid.to_string(), + user_id.clone(), ); - self.send_update(auth_response_uuid, &data).await; + self.send_update(auth_request_id, &data).await; } } @@ -579,14 +578,14 @@ impl AnonymousWebSocketSubscriptions { "ReceiveMessage", // Target [ // Arguments { - "ContextId": acting_device_uuid || Nil, + "ContextId": acting_device_id || Nil, "Type": ut as i32, "Payload": {} } ] ] */ -fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uuid: Option) -> Vec { +fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_id: Option) -> Vec { use rmpv::Value as V; let value = V::Array(vec![ @@ -595,7 +594,7 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uui V::Nil, "ReceiveMessage".into(), V::Array(vec![V::Map(vec![ - ("ContextId".into(), acting_device_uuid.map(|v| v.into()).unwrap_or_else(|| V::Nil)), + ("ContextId".into(), acting_device_id.map(|v| v.to_string().into()).unwrap_or_else(|| V::Nil)), ("Type".into(), (ut as i32).into()), ("Payload".into(), payload.into()), ])]), @@ -604,7 +603,7 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uui serialize(value) } -fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id: String) -> Vec { +fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id: UserId) -> Vec { use rmpv::Value as V; let value = V::Array(vec![ @@ -615,7 +614,7 @@ fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id V::Array(vec![V::Map(vec![ ("Type".into(), (ut as i32).into()), ("Payload".into(), payload.into()), - ("UserId".into(), user_id.into()), + ("UserId".into(), user_id.to_string().into()), ])]), ]); diff --git a/src/api/push.rs b/src/api/push.rs index d6abfc49..e3ea0bcb 100644 --- a/src/api/push.rs +++ b/src/api/push.rs @@ -7,7 +7,7 @@ use tokio::sync::RwLock; use crate::{ api::{ApiResult, EmptyResult, UpdateType}, - db::models::{Cipher, Device, Folder, Send, User}, + db::models::{AuthRequestId, Cipher, Device, DeviceId, Folder, Send, User, UserId}, http_client::make_http_request, util::format_date, CONFIG, @@ -126,15 +126,15 @@ pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbC Ok(()) } -pub async fn unregister_push_device(push_uuid: Option) -> EmptyResult { - if !CONFIG.push_enabled() || push_uuid.is_none() { +pub async fn unregister_push_device(push_id: Option) -> EmptyResult { + if !CONFIG.push_enabled() || push_id.is_none() { return Ok(()); } let auth_push_token = get_auth_push_token().await?; let auth_header = format!("Bearer {}", &auth_push_token); - match make_http_request(Method::DELETE, &(CONFIG.push_relay_uri() + "/push/" + &push_uuid.unwrap()))? + match make_http_request(Method::DELETE, &(CONFIG.push_relay_uri() + "/push/" + &push_id.unwrap()))? .header(AUTHORIZATION, auth_header) .send() .await @@ -148,27 +148,24 @@ pub async fn unregister_push_device(push_uuid: Option) -> EmptyResult { pub async fn push_cipher_update( ut: UpdateType, cipher: &Cipher, - acting_device_uuid: &String, + acting_device_id: &DeviceId, conn: &mut crate::db::DbConn, ) { // We shouldn't send a push notification on cipher update if the cipher belongs to an organization, this isn't implemented in the upstream server too. if cipher.organization_uuid.is_some() { return; }; - let user_uuid = match &cipher.user_uuid { - Some(c) => c, - None => { - debug!("Cipher has no uuid"); - return; - } + let Some(user_id) = &cipher.user_uuid else { + debug!("Cipher has no uuid"); + return; }; - if Device::check_user_has_push_device(user_uuid, conn).await { + if Device::check_user_has_push_device(user_id, conn).await { send_to_push_relay(json!({ - "userId": user_uuid, + "userId": user_id, "organizationId": (), - "deviceId": acting_device_uuid, - "identifier": acting_device_uuid, + "deviceId": acting_device_id, + "identifier": acting_device_id, "type": ut as i32, "payload": { "Id": cipher.uuid, @@ -181,14 +178,14 @@ pub async fn push_cipher_update( } } -pub fn push_logout(user: &User, acting_device_uuid: Option) { - let acting_device_uuid: Value = acting_device_uuid.map(|v| v.into()).unwrap_or_else(|| Value::Null); +pub fn push_logout(user: &User, acting_device_id: Option) { + let acting_device_id: Value = acting_device_id.map(|v| v.to_string().into()).unwrap_or_else(|| Value::Null); tokio::task::spawn(send_to_push_relay(json!({ "userId": user.uuid, "organizationId": (), - "deviceId": acting_device_uuid, - "identifier": acting_device_uuid, + "deviceId": acting_device_id, + "identifier": acting_device_id, "type": UpdateType::LogOut as i32, "payload": { "UserId": user.uuid, @@ -214,15 +211,15 @@ pub fn push_user_update(ut: UpdateType, user: &User) { pub async fn push_folder_update( ut: UpdateType, folder: &Folder, - acting_device_uuid: &String, + acting_device_id: &DeviceId, conn: &mut crate::db::DbConn, ) { if Device::check_user_has_push_device(&folder.user_uuid, conn).await { tokio::task::spawn(send_to_push_relay(json!({ "userId": folder.user_uuid, "organizationId": (), - "deviceId": acting_device_uuid, - "identifier": acting_device_uuid, + "deviceId": acting_device_id, + "identifier": acting_device_id, "type": ut as i32, "payload": { "Id": folder.uuid, @@ -233,14 +230,14 @@ pub async fn push_folder_update( } } -pub async fn push_send_update(ut: UpdateType, send: &Send, acting_device_uuid: &String, conn: &mut crate::db::DbConn) { +pub async fn push_send_update(ut: UpdateType, send: &Send, acting_device_id: &DeviceId, conn: &mut crate::db::DbConn) { if let Some(s) = &send.user_uuid { if Device::check_user_has_push_device(s, conn).await { tokio::task::spawn(send_to_push_relay(json!({ "userId": send.user_uuid, "organizationId": (), - "deviceId": acting_device_uuid, - "identifier": acting_device_uuid, + "deviceId": acting_device_id, + "identifier": acting_device_id, "type": ut as i32, "payload": { "Id": send.uuid, @@ -287,38 +284,38 @@ async fn send_to_push_relay(notification_data: Value) { }; } -pub async fn push_auth_request(user_uuid: String, auth_request_uuid: String, conn: &mut crate::db::DbConn) { - if Device::check_user_has_push_device(user_uuid.as_str(), conn).await { +pub async fn push_auth_request(user_id: UserId, auth_request_id: String, conn: &mut crate::db::DbConn) { + if Device::check_user_has_push_device(&user_id, conn).await { tokio::task::spawn(send_to_push_relay(json!({ - "userId": user_uuid, + "userId": user_id, "organizationId": (), "deviceId": null, "identifier": null, "type": UpdateType::AuthRequest as i32, "payload": { - "Id": auth_request_uuid, - "UserId": user_uuid, + "Id": auth_request_id, + "UserId": user_id, } }))); } } pub async fn push_auth_response( - user_uuid: String, - auth_request_uuid: String, - approving_device_uuid: String, + user_id: &UserId, + auth_request_id: &AuthRequestId, + approving_device_id: &DeviceId, conn: &mut crate::db::DbConn, ) { - if Device::check_user_has_push_device(user_uuid.as_str(), conn).await { + if Device::check_user_has_push_device(user_id, conn).await { tokio::task::spawn(send_to_push_relay(json!({ - "userId": user_uuid, + "userId": user_id, "organizationId": (), - "deviceId": approving_device_uuid, - "identifier": approving_device_uuid, + "deviceId": approving_device_id, + "identifier": approving_device_id, "type": UpdateType::AuthRequestResponse as i32, "payload": { - "Id": auth_request_uuid, - "UserId": user_uuid, + "Id": auth_request_id, + "UserId": user_id, } }))); } diff --git a/src/api/web.rs b/src/api/web.rs index a96d7e2a..ebb0b0e0 100644 --- a/src/api/web.rs +++ b/src/api/web.rs @@ -1,4 +1,3 @@ -use once_cell::sync::Lazy; use std::path::{Path, PathBuf}; use rocket::{ @@ -13,8 +12,9 @@ use serde_json::Value; use crate::{ api::{core::now, ApiResult, EmptyResult}, auth::decode_file_download, + db::models::{AttachmentId, CipherId}, error::Error, - util::{get_web_vault_version, Cached, SafeString}, + util::Cached, CONFIG, }; @@ -54,43 +54,7 @@ fn not_found() -> ApiResult> { #[get("/css/vaultwarden.css")] fn vaultwarden_css() -> Cached> { - // Configure the web-vault version as an integer so it can be used as a comparison smaller or greater then. - // The default is based upon the version since this feature is added. - static WEB_VAULT_VERSION: Lazy = Lazy::new(|| { - let re = regex::Regex::new(r"(\d{4})\.(\d{1,2})\.(\d{1,2})").unwrap(); - let vault_version = get_web_vault_version(); - - let (major, minor, patch) = match re.captures(&vault_version) { - Some(c) if c.len() == 4 => ( - c.get(1).unwrap().as_str().parse().unwrap(), - c.get(2).unwrap().as_str().parse().unwrap(), - c.get(3).unwrap().as_str().parse().unwrap(), - ), - _ => (2024, 6, 2), - }; - format!("{major}{minor:02}{patch:02}").parse::().unwrap() - }); - - // Configure the Vaultwarden version as an integer so it can be used as a comparison smaller or greater then. - // The default is based upon the version since this feature is added. - static VW_VERSION: Lazy = Lazy::new(|| { - let re = regex::Regex::new(r"(\d{1})\.(\d{1,2})\.(\d{1,2})").unwrap(); - let vw_version = crate::VERSION.unwrap_or("1.32.1"); - - let (major, minor, patch) = match re.captures(vw_version) { - Some(c) if c.len() == 4 => ( - c.get(1).unwrap().as_str().parse().unwrap(), - c.get(2).unwrap().as_str().parse().unwrap(), - c.get(3).unwrap().as_str().parse().unwrap(), - ), - _ => (1, 32, 1), - }; - format!("{major}{minor:02}{patch:02}").parse::().unwrap() - }); - let css_options = json!({ - "web_vault_version": *WEB_VAULT_VERSION, - "vw_version": *VW_VERSION, "signup_disabled": !CONFIG.signups_allowed() && CONFIG.signups_domains_whitelist().is_empty(), "mail_enabled": CONFIG.mail_enabled(), "yubico_enabled": CONFIG._enable_yubico() && (CONFIG.yubico_client_id().is_some() == CONFIG.yubico_secret_key().is_some()), @@ -195,16 +159,16 @@ async fn web_files(p: PathBuf) -> Cached> { Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).await.ok(), true) } -#[get("/attachments//?")] -async fn attachments(uuid: SafeString, file_id: SafeString, token: String) -> Option { +#[get("/attachments//?")] +async fn attachments(cipher_id: CipherId, file_id: AttachmentId, token: String) -> Option { let Ok(claims) = decode_file_download(&token) else { return None; }; - if claims.sub != *uuid || claims.file_id != *file_id { + if claims.sub != cipher_id || claims.file_id != file_id { return None; } - NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file_id)).await.ok() + NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(cipher_id.as_ref()).join(file_id.as_ref())).await.ok() } // We use DbConn here to let the alive healthcheck also verify the database connection. diff --git a/src/auth.rs b/src/auth.rs index 44b2adbe..d446109a 100644 --- a/src/auth.rs +++ b/src/auth.rs @@ -14,6 +14,10 @@ use std::{ net::IpAddr, }; +use crate::db::models::{ + AttachmentId, CipherId, CollectionId, DeviceId, EmergencyAccessId, MembershipId, OrgApiKeyId, OrganizationId, + SendFileId, SendId, UserId, +}; use crate::{error::Error, CONFIG}; const JWT_ALGORITHM: Algorithm = Algorithm::RS256; @@ -155,7 +159,7 @@ pub struct LoginJwtClaims { // Issuer pub iss: String, // Subject - pub sub: String, + pub sub: UserId, pub premium: bool, pub name: String, @@ -176,7 +180,7 @@ pub struct LoginJwtClaims { // user security_stamp pub sstamp: String, // device uuid - pub device: String, + pub device: DeviceId, // [ "api", "offline_access" ] pub scope: Vec, // [ "Application" ] @@ -192,19 +196,19 @@ pub struct InviteJwtClaims { // Issuer pub iss: String, // Subject - pub sub: String, + pub sub: UserId, pub email: String, - pub org_id: Option, - pub user_org_id: Option, + pub org_id: OrganizationId, + pub member_id: MembershipId, pub invited_by_email: Option, } pub fn generate_invite_claims( - uuid: String, + user_id: UserId, email: String, - org_id: Option, - user_org_id: Option, + org_id: OrganizationId, + member_id: MembershipId, invited_by_email: Option, ) -> InviteJwtClaims { let time_now = Utc::now(); @@ -213,10 +217,10 @@ pub fn generate_invite_claims( nbf: time_now.timestamp(), exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(), iss: JWT_INVITE_ISSUER.to_string(), - sub: uuid, + sub: user_id, email, org_id, - user_org_id, + member_id, invited_by_email, } } @@ -230,18 +234,18 @@ pub struct EmergencyAccessInviteJwtClaims { // Issuer pub iss: String, // Subject - pub sub: String, + pub sub: UserId, pub email: String, - pub emer_id: String, + pub emer_id: EmergencyAccessId, pub grantor_name: String, pub grantor_email: String, } pub fn generate_emergency_access_invite_claims( - uuid: String, + user_id: UserId, email: String, - emer_id: String, + emer_id: EmergencyAccessId, grantor_name: String, grantor_email: String, ) -> EmergencyAccessInviteJwtClaims { @@ -251,7 +255,7 @@ pub fn generate_emergency_access_invite_claims( nbf: time_now.timestamp(), exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(), iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(), - sub: uuid, + sub: user_id, email, emer_id, grantor_name, @@ -268,21 +272,24 @@ pub struct OrgApiKeyLoginJwtClaims { // Issuer pub iss: String, // Subject - pub sub: String, + pub sub: OrgApiKeyId, pub client_id: String, - pub client_sub: String, + pub client_sub: OrganizationId, pub scope: Vec, } -pub fn generate_organization_api_key_login_claims(uuid: String, org_id: String) -> OrgApiKeyLoginJwtClaims { +pub fn generate_organization_api_key_login_claims( + org_api_key_uuid: OrgApiKeyId, + org_id: OrganizationId, +) -> OrgApiKeyLoginJwtClaims { let time_now = Utc::now(); OrgApiKeyLoginJwtClaims { nbf: time_now.timestamp(), exp: (time_now + TimeDelta::try_hours(1).unwrap()).timestamp(), iss: JWT_ORG_API_KEY_ISSUER.to_string(), - sub: uuid, - client_id: format!("organization.{org_id}"), + sub: org_api_key_uuid, + client_id: format!("organization.{}", org_id), client_sub: org_id, scope: vec!["api.organization".into()], } @@ -297,18 +304,18 @@ pub struct FileDownloadClaims { // Issuer pub iss: String, // Subject - pub sub: String, + pub sub: CipherId, - pub file_id: String, + pub file_id: AttachmentId, } -pub fn generate_file_download_claims(uuid: String, file_id: String) -> FileDownloadClaims { +pub fn generate_file_download_claims(cipher_id: CipherId, file_id: AttachmentId) -> FileDownloadClaims { let time_now = Utc::now(); FileDownloadClaims { nbf: time_now.timestamp(), exp: (time_now + TimeDelta::try_minutes(5).unwrap()).timestamp(), iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(), - sub: uuid, + sub: cipher_id, file_id, } } @@ -363,14 +370,14 @@ pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims { } } -pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims { +pub fn generate_verify_email_claims(user_id: UserId) -> BasicJwtClaims { let time_now = Utc::now(); let expire_hours = i64::from(CONFIG.invitation_expiration_hours()); BasicJwtClaims { nbf: time_now.timestamp(), exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(), iss: JWT_VERIFYEMAIL_ISSUER.to_string(), - sub: uuid, + sub: user_id.to_string(), } } @@ -384,7 +391,7 @@ pub fn generate_admin_claims() -> BasicJwtClaims { } } -pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims { +pub fn generate_send_claims(send_id: &SendId, file_id: &SendFileId) -> BasicJwtClaims { let time_now = Utc::now(); BasicJwtClaims { nbf: time_now.timestamp(), @@ -403,7 +410,7 @@ use rocket::{ }; use crate::db::{ - models::{Collection, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException}, + models::{Collection, Device, Membership, MembershipStatus, MembershipType, User, UserStampException}, DbConn, }; @@ -503,36 +510,32 @@ impl<'r> FromRequest<'r> for Headers { }; // Check JWT token is valid and get device and user from it - let claims = match decode_login(access_token) { - Ok(claims) => claims, - Err(_) => err_handler!("Invalid claim"), + let Ok(claims) = decode_login(access_token) else { + err_handler!("Invalid claim") }; - let device_uuid = claims.device; - let user_uuid = claims.sub; + let device_id = claims.device; + let user_id = claims.sub; let mut conn = match DbConn::from_request(request).await { Outcome::Success(conn) => conn, _ => err_handler!("Error getting DB"), }; - let device = match Device::find_by_uuid_and_user(&device_uuid, &user_uuid, &mut conn).await { - Some(device) => device, - None => err_handler!("Invalid device id"), + let Some(device) = Device::find_by_uuid_and_user(&device_id, &user_id, &mut conn).await else { + err_handler!("Invalid device id") }; - let user = match User::find_by_uuid(&user_uuid, &mut conn).await { - Some(user) => user, - None => err_handler!("Device has no user associated"), + let Some(user) = User::find_by_uuid(&user_id, &mut conn).await else { + err_handler!("Device has no user associated") }; if user.security_stamp != claims.sstamp { if let Some(stamp_exception) = user.stamp_exception.as_deref().and_then(|s| serde_json::from_str::(s).ok()) { - let current_route = match request.route().and_then(|r| r.name.as_deref()) { - Some(name) => name, - _ => err_handler!("Error getting current route for stamp exception"), + let Some(current_route) = request.route().and_then(|r| r.name.as_deref()) else { + err_handler!("Error getting current route for stamp exception") }; // Check if the stamp exception has expired first. @@ -570,11 +573,30 @@ pub struct OrgHeaders { pub host: String, pub device: Device, pub user: User, - pub org_user_type: UserOrgType, - pub org_user: UserOrganization, + pub membership_type: MembershipType, + pub membership_status: MembershipStatus, + pub membership: Membership, pub ip: ClientIp, } +impl OrgHeaders { + fn is_member(&self) -> bool { + // NOTE: we don't care about MembershipStatus at the moment because this is only used + // where an invited, accepted or confirmed user is expected if this ever changes or + // if from_i32 is changed to return Some(Revoked) this check needs to be changed accordingly + self.membership_type >= MembershipType::User + } + fn is_confirmed_and_admin(&self) -> bool { + self.membership_status == MembershipStatus::Confirmed && self.membership_type >= MembershipType::Admin + } + fn is_confirmed_and_manager(&self) -> bool { + self.membership_status == MembershipStatus::Confirmed && self.membership_type >= MembershipType::Manager + } + fn is_confirmed_and_owner(&self) -> bool { + self.membership_status == MembershipStatus::Confirmed && self.membership_type == MembershipType::Owner + } +} + #[rocket::async_trait] impl<'r> FromRequest<'r> for OrgHeaders { type Error = &'static str; @@ -585,55 +607,50 @@ impl<'r> FromRequest<'r> for OrgHeaders { // org_id is usually the second path param ("/organizations/"), // but there are cases where it is a query value. // First check the path, if this is not a valid uuid, try the query values. - let url_org_id: Option<&str> = { - let mut url_org_id = None; - if let Some(Ok(org_id)) = request.param::<&str>(1) { - if uuid::Uuid::parse_str(org_id).is_ok() { - url_org_id = Some(org_id); - } + let url_org_id: Option = { + if let Some(Ok(org_id)) = request.param::(1) { + Some(org_id.clone()) + } else if let Some(Ok(org_id)) = request.query_value::("organizationId") { + Some(org_id.clone()) + } else { + None } - - if let Some(Ok(org_id)) = request.query_value::<&str>("organizationId") { - if uuid::Uuid::parse_str(org_id).is_ok() { - url_org_id = Some(org_id); - } - } - - url_org_id }; match url_org_id { - Some(org_id) => { + Some(org_id) if uuid::Uuid::parse_str(&org_id).is_ok() => { let mut conn = match DbConn::from_request(request).await { Outcome::Success(conn) => conn, _ => err_handler!("Error getting DB"), }; let user = headers.user; - let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, org_id, &mut conn).await { - Some(user) => { - if user.status == UserOrgStatus::Confirmed as i32 { - user - } else { - err_handler!("The current user isn't confirmed member of the organization") - } - } - None => err_handler!("The current user isn't member of the organization"), + let Some(membership) = Membership::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await else { + err_handler!("The current user isn't member of the organization"); }; Outcome::Success(Self { host: headers.host, device: headers.device, user, - org_user_type: { - if let Some(org_usr_type) = UserOrgType::from_i32(org_user.atype) { - org_usr_type + membership_type: { + if let Some(member_type) = MembershipType::from_i32(membership.atype) { + member_type } else { // This should only happen if the DB is corrupted err_handler!("Unknown user type in the database") } }, - org_user, + membership_status: { + if let Some(member_status) = MembershipStatus::from_i32(membership.status) { + // NOTE: add additional check for revoked if from_i32 is ever changed + // to return Revoked status. + member_status + } else { + err_handler!("User status is either revoked or invalid.") + } + }, + membership, ip: headers.ip, }) } @@ -646,8 +663,9 @@ pub struct AdminHeaders { pub host: String, pub device: Device, pub user: User, - pub org_user_type: UserOrgType, + pub membership_type: MembershipType, pub ip: ClientIp, + pub org_id: OrganizationId, } #[rocket::async_trait] @@ -656,13 +674,14 @@ impl<'r> FromRequest<'r> for AdminHeaders { async fn from_request(request: &'r Request<'_>) -> Outcome { let headers = try_outcome!(OrgHeaders::from_request(request).await); - if headers.org_user_type >= UserOrgType::Admin { + if headers.is_confirmed_and_admin() { Outcome::Success(Self { host: headers.host, device: headers.device, user: headers.user, - org_user_type: headers.org_user_type, + membership_type: headers.membership_type, ip: headers.ip, + org_id: headers.membership.org_uuid, }) } else { err_handler!("You need to be Admin or Owner to call this endpoint") @@ -684,16 +703,16 @@ impl From for Headers { // col_id is usually the fourth path param ("/organizations//collections/"), // but there could be cases where it is a query value. // First check the path, if this is not a valid uuid, try the query values. -fn get_col_id(request: &Request<'_>) -> Option { +fn get_col_id(request: &Request<'_>) -> Option { if let Some(Ok(col_id)) = request.param::(3) { if uuid::Uuid::parse_str(&col_id).is_ok() { - return Some(col_id); + return Some(col_id.into()); } } if let Some(Ok(col_id)) = request.query_value::("collectionId") { if uuid::Uuid::parse_str(&col_id).is_ok() { - return Some(col_id); + return Some(col_id.into()); } } @@ -708,6 +727,7 @@ pub struct ManagerHeaders { pub device: Device, pub user: User, pub ip: ClientIp, + pub org_id: OrganizationId, } #[rocket::async_trait] @@ -716,7 +736,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders { async fn from_request(request: &'r Request<'_>) -> Outcome { let headers = try_outcome!(OrgHeaders::from_request(request).await); - if headers.org_user_type >= UserOrgType::Manager { + if headers.is_confirmed_and_manager() { match get_col_id(request) { Some(col_id) => { let mut conn = match DbConn::from_request(request).await { @@ -724,7 +744,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders { _ => err_handler!("Error getting DB"), }; - if !Collection::can_access_collection(&headers.org_user, &col_id, &mut conn).await { + if !Collection::can_access_collection(&headers.membership, &col_id, &mut conn).await { err_handler!("The current user isn't a manager for this collection") } } @@ -736,6 +756,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders { device: headers.device, user: headers.user, ip: headers.ip, + org_id: headers.membership.org_uuid, }) } else { err_handler!("You need to be a Manager, Admin or Owner to call this endpoint") @@ -760,7 +781,7 @@ pub struct ManagerHeadersLoose { pub host: String, pub device: Device, pub user: User, - pub org_user: UserOrganization, + pub membership: Membership, pub ip: ClientIp, } @@ -770,12 +791,12 @@ impl<'r> FromRequest<'r> for ManagerHeadersLoose { async fn from_request(request: &'r Request<'_>) -> Outcome { let headers = try_outcome!(OrgHeaders::from_request(request).await); - if headers.org_user_type >= UserOrgType::Manager { + if headers.is_confirmed_and_manager() { Outcome::Success(Self { host: headers.host, device: headers.device, user: headers.user, - org_user: headers.org_user, + membership: headers.membership, ip: headers.ip, }) } else { @@ -798,14 +819,14 @@ impl From for Headers { impl ManagerHeaders { pub async fn from_loose( h: ManagerHeadersLoose, - collections: &Vec, + collections: &Vec, conn: &mut DbConn, ) -> Result { for col_id in collections { - if uuid::Uuid::parse_str(col_id).is_err() { + if uuid::Uuid::parse_str(col_id.as_ref()).is_err() { err!("Collection Id is malformed!"); } - if !Collection::can_access_collection(&h.org_user, col_id, conn).await { + if !Collection::can_access_collection(&h.membership, col_id, conn).await { err!("You don't have access to all collections!"); } } @@ -815,6 +836,7 @@ impl ManagerHeaders { device: h.device, user: h.user, ip: h.ip, + org_id: h.membership.org_uuid, }) } } @@ -823,6 +845,7 @@ pub struct OwnerHeaders { pub device: Device, pub user: User, pub ip: ClientIp, + pub org_id: OrganizationId, } #[rocket::async_trait] @@ -831,11 +854,12 @@ impl<'r> FromRequest<'r> for OwnerHeaders { async fn from_request(request: &'r Request<'_>) -> Outcome { let headers = try_outcome!(OrgHeaders::from_request(request).await); - if headers.org_user_type == UserOrgType::Owner { + if headers.is_confirmed_and_owner() { Outcome::Success(Self { device: headers.device, user: headers.user, ip: headers.ip, + org_id: headers.membership.org_uuid, }) } else { err_handler!("You need to be Owner to call this endpoint") @@ -843,6 +867,30 @@ impl<'r> FromRequest<'r> for OwnerHeaders { } } +pub struct OrgMemberHeaders { + pub host: String, + pub user: User, + pub org_id: OrganizationId, +} + +#[rocket::async_trait] +impl<'r> FromRequest<'r> for OrgMemberHeaders { + type Error = &'static str; + + async fn from_request(request: &'r Request<'_>) -> Outcome { + let headers = try_outcome!(OrgHeaders::from_request(request).await); + if headers.is_member() { + Outcome::Success(Self { + host: headers.host, + user: headers.user, + org_id: headers.membership.org_uuid, + }) + } else { + err_handler!("You need to be a Member of the Organization to call this endpoint") + } + } +} + // // Client IP address detection // diff --git a/src/config.rs b/src/config.rs index c4a8a005..2c0740d4 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,8 +1,10 @@ -use std::env::consts::EXE_SUFFIX; -use std::process::exit; -use std::sync::{ - atomic::{AtomicBool, Ordering}, - RwLock, +use std::{ + env::consts::EXE_SUFFIX, + process::exit, + sync::{ + atomic::{AtomicBool, Ordering}, + RwLock, + }, }; use job_scheduler_ng::Schedule; @@ -12,7 +14,7 @@ use reqwest::Url; use crate::{ db::DbConnType, error::Error, - util::{get_env, get_env_bool, parse_experimental_client_feature_flags}, + util::{get_env, get_env_bool, get_web_vault_version, is_valid_email, parse_experimental_client_feature_flags}, }; static CONFIG_FILE: Lazy = Lazy::new(|| { @@ -114,6 +116,14 @@ macro_rules! make_config { serde_json::from_str(&config_str).map_err(Into::into) } + fn clear_non_editable(&mut self) { + $($( + if !$editable { + self.$name = None; + } + )+)+ + } + /// Merges the values of both builders into a new builder. /// If both have the same element, `other` wins. fn merge(&self, other: &Self, show_overrides: bool, overrides: &mut Vec) -> Self { @@ -238,6 +248,7 @@ macro_rules! make_config { // Besides Pass, only String types will be masked via _privacy_mask. const PRIVACY_CONFIG: &[&str] = &[ "allowed_iframe_ancestors", + "allowed_connect_src", "database_url", "domain_origin", "domain_path", @@ -248,6 +259,7 @@ macro_rules! make_config { "smtp_from", "smtp_host", "smtp_username", + "_smtp_img_src", ]; let cfg = { @@ -610,6 +622,9 @@ make_config! { /// Allowed iframe ancestors (Know the risks!) |> Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets allowed_iframe_ancestors: String, true, def, String::new(); + /// Allowed connect-src (Know the risks!) |> Allows other domains to URLs which can be loaded using script interfaces like the Forwarded email alias feature + allowed_connect_src: String, true, def, String::new(); + /// Seconds between login requests |> Number of seconds, on average, between login and 2FA requests from the same IP address before rate limiting kicks in login_ratelimit_seconds: u64, false, def, 60; /// Max burst size for login requests |> Allow a burst of requests of up to this size, while maintaining the average indicated by `login_ratelimit_seconds`. Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2 @@ -656,9 +671,9 @@ make_config! { _enable_duo: bool, true, def, true; /// Attempt to use deprecated iframe-based Traditional Prompt (Duo WebSDK 2) duo_use_iframe: bool, false, def, false; - /// Integration Key + /// Client Id duo_ikey: String, true, option; - /// Secret Key + /// Client Secret duo_skey: Pass, true, option; /// Host duo_host: String, true, option; @@ -673,7 +688,7 @@ make_config! { /// Use Sendmail |> Whether to send mail via the `sendmail` command use_sendmail: bool, true, def, false; /// Sendmail Command |> Which sendmail command to use. The one found in the $PATH is used if not specified. - sendmail_command: String, true, option; + sendmail_command: String, false, option; /// Host smtp_host: String, true, option; /// DEPRECATED smtp_ssl |> DEPRECATED - Please use SMTP_SECURITY @@ -761,6 +776,13 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { ); } + let connect_src = cfg.allowed_connect_src.to_lowercase(); + for url in connect_src.split_whitespace() { + if !url.starts_with("https://") || Url::parse(url).is_err() { + err!("ALLOWED_CONNECT_SRC variable contains one or more invalid URLs. Only FQDN's starting with https are allowed"); + } + } + let whitelist = &cfg.signups_domains_whitelist; if !whitelist.is_empty() && whitelist.split(',').any(|d| d.trim().is_empty()) { err!("`SIGNUPS_DOMAINS_WHITELIST` contains empty tokens"); @@ -818,6 +840,7 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { "browser-fileless-import", "extension-refresh", "fido2-vault-credentials", + "inline-menu-positioning-improvements", "ssh-key-vault-item", "ssh-agent", ]; @@ -881,12 +904,12 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { let command = cfg.sendmail_command.clone().unwrap_or_else(|| format!("sendmail{EXE_SUFFIX}")); let mut path = std::path::PathBuf::from(&command); - + // Check if we can find the sendmail command to execute when no absolute path is given if !path.is_absolute() { - match which::which(&command) { - Ok(result) => path = result, - Err(_) => err!(format!("sendmail command {command:?} not found in $PATH")), - } + let Ok(which_path) = which::which(&command) else { + err!(format!("sendmail command {command} not found in $PATH")) + }; + path = which_path; } match path.metadata() { @@ -920,8 +943,8 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { } } - if (cfg.smtp_host.is_some() || cfg.use_sendmail) && !cfg.smtp_from.contains('@') { - err!("SMTP_FROM does not contain a mandatory @ sign") + if (cfg.smtp_host.is_some() || cfg.use_sendmail) && !is_valid_email(&cfg.smtp_from) { + err!(format!("SMTP_FROM '{}' is not a valid email address", cfg.smtp_from)) } if cfg._enable_email_2fa && cfg.email_token_size < 6 { @@ -1134,12 +1157,17 @@ impl Config { }) } - pub fn update_config(&self, other: ConfigBuilder) -> Result<(), Error> { + pub fn update_config(&self, other: ConfigBuilder, ignore_non_editable: bool) -> Result<(), Error> { // Remove default values //let builder = other.remove(&self.inner.read().unwrap()._env); // TODO: Remove values that are defaults, above only checks those set by env and not the defaults - let builder = other; + let mut builder = other; + + // Remove values that are not editable + if ignore_non_editable { + builder.clear_non_editable(); + } // Serialize now before we consume the builder let config_str = serde_json::to_string_pretty(&builder)?; @@ -1174,7 +1202,7 @@ impl Config { let mut _overrides = Vec::new(); usr.merge(&other, false, &mut _overrides) }; - self.update_config(builder) + self.update_config(builder, false) } /// Tests whether an email's domain is allowed. A domain is allowed if it @@ -1315,6 +1343,8 @@ where // Register helpers hb.register_helper("case", Box::new(case_helper)); hb.register_helper("to_json", Box::new(to_json)); + hb.register_helper("webver", Box::new(webver)); + hb.register_helper("vwver", Box::new(vwver)); macro_rules! reg { ($name:expr) => {{ @@ -1419,3 +1449,42 @@ fn to_json<'reg, 'rc>( out.write(&json)?; Ok(()) } + +// Configure the web-vault version as an integer so it can be used as a comparison smaller or greater then. +// The default is based upon the version since this feature is added. +static WEB_VAULT_VERSION: Lazy = Lazy::new(|| { + let vault_version = get_web_vault_version(); + // Use a single regex capture to extract version components + let re = regex::Regex::new(r"(\d{4})\.(\d{1,2})\.(\d{1,2})").unwrap(); + re.captures(&vault_version) + .and_then(|c| { + (c.len() == 4).then(|| { + format!("{}.{}.{}", c.get(1).unwrap().as_str(), c.get(2).unwrap().as_str(), c.get(3).unwrap().as_str()) + }) + }) + .and_then(|v| semver::Version::parse(&v).ok()) + .unwrap_or_else(|| semver::Version::parse("2024.6.2").unwrap()) +}); + +// Configure the Vaultwarden version as an integer so it can be used as a comparison smaller or greater then. +// The default is based upon the version since this feature is added. +static VW_VERSION: Lazy = Lazy::new(|| { + let vw_version = crate::VERSION.unwrap_or("1.32.5"); + // Use a single regex capture to extract version components + let re = regex::Regex::new(r"(\d{1})\.(\d{1,2})\.(\d{1,2})").unwrap(); + re.captures(vw_version) + .and_then(|c| { + (c.len() == 4).then(|| { + format!("{}.{}.{}", c.get(1).unwrap().as_str(), c.get(2).unwrap().as_str(), c.get(3).unwrap().as_str()) + }) + }) + .and_then(|v| semver::Version::parse(&v).ok()) + .unwrap_or_else(|| semver::Version::parse("1.32.5").unwrap()) +}); + +handlebars::handlebars_helper!(webver: | web_vault_version: String | + semver::VersionReq::parse(&web_vault_version).expect("Invalid web-vault version compare string").matches(&WEB_VAULT_VERSION) +); +handlebars::handlebars_helper!(vwver: | vw_version: String | + semver::VersionReq::parse(&vw_version).expect("Invalid Vaultwarden version compare string").matches(&VW_VERSION) +); diff --git a/src/crypto.rs b/src/crypto.rs index 99f0fb91..5ab8f1fb 100644 --- a/src/crypto.rs +++ b/src/crypto.rs @@ -6,7 +6,7 @@ use std::num::NonZeroU32; use data_encoding::{Encoding, HEXLOWER}; use ring::{digest, hmac, pbkdf2}; -static DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256; +const DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256; const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN; pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec { @@ -56,11 +56,11 @@ pub fn encode_random_bytes(e: Encoding) -> String { pub fn get_random_string(alphabet: &[u8], num_chars: usize) -> String { // Ref: https://rust-lang-nursery.github.io/rust-cookbook/algorithms/randomness.html use rand::Rng; - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); (0..num_chars) .map(|_| { - let i = rng.gen_range(0..alphabet.len()); + let i = rng.random_range(0..alphabet.len()); alphabet[i] as char }) .collect() @@ -84,14 +84,15 @@ pub fn generate_id() -> String { encode_random_bytes::(HEXLOWER) } -pub fn generate_send_id() -> String { - // Send IDs are globally scoped, so make them longer to avoid collisions. +pub fn generate_send_file_id() -> String { + // Send File IDs are globally scoped, so make them longer to avoid collisions. generate_id::<32>() // 256 bits } -pub fn generate_attachment_id() -> String { +use crate::db::models::AttachmentId; +pub fn generate_attachment_id() -> AttachmentId { // Attachment IDs are scoped to a cipher, so they can be smaller. - generate_id::<10>() // 80 bits + AttachmentId(generate_id::<10>()) // 80 bits } /// Generates a numeric token for email-based verifications. diff --git a/src/db/mod.rs b/src/db/mod.rs index fe1ab79b..464be561 100644 --- a/src/db/mod.rs +++ b/src/db/mod.rs @@ -373,24 +373,18 @@ pub async fn backup_database(conn: &mut DbConn) -> Result { err!("PostgreSQL and MySQL/MariaDB do not support this backup feature"); } sqlite { - backup_sqlite_database(conn) + let db_url = CONFIG.database_url(); + let db_path = std::path::Path::new(&db_url).parent().unwrap(); + let backup_file = db_path + .join(format!("db_{}.sqlite3", chrono::Utc::now().format("%Y%m%d_%H%M%S"))) + .to_string_lossy() + .into_owned(); + diesel::sql_query(format!("VACUUM INTO '{backup_file}'")).execute(conn)?; + Ok(backup_file) } } } -#[cfg(sqlite)] -pub fn backup_sqlite_database(conn: &mut diesel::sqlite::SqliteConnection) -> Result { - use diesel::RunQueryDsl; - let db_url = CONFIG.database_url(); - let db_path = std::path::Path::new(&db_url).parent().unwrap(); - let backup_file = db_path - .join(format!("db_{}.sqlite3", chrono::Utc::now().format("%Y%m%d_%H%M%S"))) - .to_string_lossy() - .into_owned(); - diesel::sql_query(format!("VACUUM INTO '{backup_file}'")).execute(conn)?; - Ok(backup_file) -} - /// Get the SQL Server version pub async fn get_sql_server_version(conn: &mut DbConn) -> String { db_run! {@raw conn: diff --git a/src/db/models/attachment.rs b/src/db/models/attachment.rs index 65855cc0..09348f78 100644 --- a/src/db/models/attachment.rs +++ b/src/db/models/attachment.rs @@ -1,9 +1,12 @@ use std::io::ErrorKind; use bigdecimal::{BigDecimal, ToPrimitive}; +use derive_more::{AsRef, Deref, Display}; use serde_json::Value; +use super::{CipherId, OrganizationId, UserId}; use crate::CONFIG; +use macros::IdFromParam; db_object! { #[derive(Identifiable, Queryable, Insertable, AsChangeset)] @@ -11,8 +14,8 @@ db_object! { #[diesel(treat_none_as_null = true)] #[diesel(primary_key(id))] pub struct Attachment { - pub id: String, - pub cipher_uuid: String, + pub id: AttachmentId, + pub cipher_uuid: CipherId, pub file_name: String, // encrypted pub file_size: i64, pub akey: Option, @@ -21,7 +24,13 @@ db_object! { /// Local methods impl Attachment { - pub const fn new(id: String, cipher_uuid: String, file_name: String, file_size: i64, akey: Option) -> Self { + pub const fn new( + id: AttachmentId, + cipher_uuid: CipherId, + file_name: String, + file_size: i64, + akey: Option, + ) -> Self { Self { id, cipher_uuid, @@ -117,14 +126,14 @@ impl Attachment { }} } - pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult { for attachment in Attachment::find_by_cipher(cipher_uuid, conn).await { attachment.delete(conn).await?; } Ok(()) } - pub async fn find_by_id(id: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_id(id: &AttachmentId, conn: &mut DbConn) -> Option { db_run! { conn: { attachments::table .filter(attachments::id.eq(id.to_lowercase())) @@ -134,7 +143,7 @@ impl Attachment { }} } - pub async fn find_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> Vec { db_run! { conn: { attachments::table .filter(attachments::cipher_uuid.eq(cipher_uuid)) @@ -144,7 +153,7 @@ impl Attachment { }} } - pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 { + pub async fn size_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 { db_run! { conn: { let result: Option = attachments::table .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) @@ -161,7 +170,7 @@ impl Attachment { }} } - pub async fn count_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 { + pub async fn count_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 { db_run! { conn: { attachments::table .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) @@ -172,7 +181,7 @@ impl Attachment { }} } - pub async fn size_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 { + pub async fn size_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 { db_run! { conn: { let result: Option = attachments::table .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) @@ -189,7 +198,7 @@ impl Attachment { }} } - pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 { + pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 { db_run! { conn: { attachments::table .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) @@ -203,7 +212,11 @@ impl Attachment { // This will return all attachments linked to the user or org // There is no filtering done here if the user actually has access! // It is used to speed up the sync process, and the matching is done in a different part. - pub async fn find_all_by_user_and_orgs(user_uuid: &str, org_uuids: &Vec, conn: &mut DbConn) -> Vec { + pub async fn find_all_by_user_and_orgs( + user_uuid: &UserId, + org_uuids: &Vec, + conn: &mut DbConn, + ) -> Vec { db_run! { conn: { attachments::table .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) @@ -216,3 +229,20 @@ impl Attachment { }} } } + +#[derive( + Clone, + Debug, + AsRef, + Deref, + DieselNewType, + Display, + FromForm, + Hash, + PartialEq, + Eq, + Serialize, + Deserialize, + IdFromParam, +)] +pub struct AttachmentId(pub String); diff --git a/src/db/models/auth_request.rs b/src/db/models/auth_request.rs index 9388c71a..7f406581 100644 --- a/src/db/models/auth_request.rs +++ b/src/db/models/auth_request.rs @@ -1,5 +1,9 @@ -use crate::crypto::ct_eq; +use super::{DeviceId, OrganizationId, UserId}; +use crate::{crypto::ct_eq, util::format_date}; use chrono::{NaiveDateTime, Utc}; +use derive_more::{AsRef, Deref, Display, From}; +use macros::UuidFromParam; +use serde_json::Value; db_object! { #[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset, Deserialize, Serialize)] @@ -7,15 +11,15 @@ db_object! { #[diesel(treat_none_as_null = true)] #[diesel(primary_key(uuid))] pub struct AuthRequest { - pub uuid: String, - pub user_uuid: String, - pub organization_uuid: Option, + pub uuid: AuthRequestId, + pub user_uuid: UserId, + pub organization_uuid: Option, - pub request_device_identifier: String, + pub request_device_identifier: DeviceId, pub device_type: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs pub request_ip: String, - pub response_device_id: Option, + pub response_device_id: Option, pub access_code: String, pub public_key: String, @@ -33,8 +37,8 @@ db_object! { impl AuthRequest { pub fn new( - user_uuid: String, - request_device_identifier: String, + user_uuid: UserId, + request_device_identifier: DeviceId, device_type: i32, request_ip: String, access_code: String, @@ -43,7 +47,7 @@ impl AuthRequest { let now = Utc::now().naive_utc(); Self { - uuid: crate::util::get_uuid(), + uuid: AuthRequestId(crate::util::get_uuid()), user_uuid, organization_uuid: None, @@ -61,6 +65,13 @@ impl AuthRequest { authentication_date: None, } } + + pub fn to_json_for_pending_device(&self) -> Value { + json!({ + "id": self.uuid, + "creationDate": format_date(&self.creation_date), + }) + } } use crate::db::DbConn; @@ -101,7 +112,7 @@ impl AuthRequest { } } - pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid(uuid: &AuthRequestId, conn: &mut DbConn) -> Option { db_run! {conn: { auth_requests::table .filter(auth_requests::uuid.eq(uuid)) @@ -111,7 +122,18 @@ impl AuthRequest { }} } - pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_uuid_and_user(uuid: &AuthRequestId, user_uuid: &UserId, conn: &mut DbConn) -> Option { + db_run! {conn: { + auth_requests::table + .filter(auth_requests::uuid.eq(uuid)) + .filter(auth_requests::user_uuid.eq(user_uuid)) + .first::(conn) + .ok() + .from_db() + }} + } + + pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! {conn: { auth_requests::table .filter(auth_requests::user_uuid.eq(user_uuid)) @@ -119,6 +141,21 @@ impl AuthRequest { }} } + pub async fn find_by_user_and_requested_device( + user_uuid: &UserId, + device_uuid: &DeviceId, + conn: &mut DbConn, + ) -> Option { + db_run! {conn: { + auth_requests::table + .filter(auth_requests::user_uuid.eq(user_uuid)) + .filter(auth_requests::request_device_identifier.eq(device_uuid)) + .filter(auth_requests::approved.is_null()) + .order_by(auth_requests::creation_date.desc()) + .first::(conn).ok().from_db() + }} + } + pub async fn find_created_before(dt: &NaiveDateTime, conn: &mut DbConn) -> Vec { db_run! {conn: { auth_requests::table @@ -146,3 +183,21 @@ impl AuthRequest { } } } + +#[derive( + Clone, + Debug, + AsRef, + Deref, + DieselNewType, + Display, + From, + FromForm, + Hash, + PartialEq, + Eq, + Serialize, + Deserialize, + UuidFromParam, +)] +pub struct AuthRequestId(String); diff --git a/src/db/models/cipher.rs b/src/db/models/cipher.rs index 75d54df2..d9dbd28d 100644 --- a/src/db/models/cipher.rs +++ b/src/db/models/cipher.rs @@ -1,13 +1,15 @@ use crate::util::LowerCase; use crate::CONFIG; use chrono::{NaiveDateTime, TimeDelta, Utc}; +use derive_more::{AsRef, Deref, Display, From}; use serde_json::Value; use super::{ - Attachment, CollectionCipher, Favorite, FolderCipher, Group, User, UserOrgStatus, UserOrgType, UserOrganization, + Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership, MembershipStatus, + MembershipType, OrganizationId, User, UserId, }; - use crate::api::core::{CipherData, CipherSyncData, CipherSyncType}; +use macros::UuidFromParam; use std::borrow::Cow; @@ -17,12 +19,12 @@ db_object! { #[diesel(treat_none_as_null = true)] #[diesel(primary_key(uuid))] pub struct Cipher { - pub uuid: String, + pub uuid: CipherId, pub created_at: NaiveDateTime, pub updated_at: NaiveDateTime, - pub user_uuid: Option, - pub organization_uuid: Option, + pub user_uuid: Option, + pub organization_uuid: Option, pub key: Option, @@ -46,10 +48,9 @@ db_object! { } } -#[allow(dead_code)] pub enum RepromptType { None = 0, - Password = 1, // not currently used in server + Password = 1, } /// Local methods @@ -58,7 +59,7 @@ impl Cipher { let now = Utc::now().naive_utc(); Self { - uuid: crate::util::get_uuid(), + uuid: CipherId(crate::util::get_uuid()), created_at: now, updated_at: now, @@ -136,12 +137,12 @@ impl Cipher { pub async fn to_json( &self, host: &str, - user_uuid: &str, + user_uuid: &UserId, cipher_sync_data: Option<&CipherSyncData>, sync_type: CipherSyncType, conn: &mut DbConn, ) -> Value { - use crate::util::format_date; + use crate::util::{format_date, validate_and_format_date}; let mut attachments_json: Value = Value::Null; if let Some(cipher_sync_data) = cipher_sync_data { @@ -157,16 +158,16 @@ impl Cipher { // We don't need these values at all for Organizational syncs // Skip any other database calls if this is the case and just return false. - let (read_only, hide_passwords) = if sync_type == CipherSyncType::User { + let (read_only, hide_passwords, _) = if sync_type == CipherSyncType::User { match self.get_access_restrictions(user_uuid, cipher_sync_data, conn).await { - Some((ro, hp)) => (ro, hp), + Some((ro, hp, mn)) => (ro, hp, mn), None => { error!("Cipher ownership assertion failure"); - (true, true) + (true, true, false) } } } else { - (false, false) + (false, false, false) }; let fields_json: Vec<_> = self @@ -219,7 +220,7 @@ impl Cipher { }) .map(|mut d| match d.get("lastUsedDate").and_then(|l| l.as_str()) { Some(l) => { - d["lastUsedDate"] = json!(crate::util::validate_and_format_date(l)); + d["lastUsedDate"] = json!(validate_and_format_date(l)); d } _ => { @@ -242,12 +243,28 @@ impl Cipher { // NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream // Set the first element of the Uris array as Uri, this is needed several (mobile) clients. if self.atype == 1 { - if type_data_json["uris"].is_array() { - let uri = type_data_json["uris"][0]["uri"].clone(); - type_data_json["uri"] = uri; - } else { - // Upstream always has an Uri key/value - type_data_json["uri"] = Value::Null; + // Upstream always has an `uri` key/value + type_data_json["uri"] = Value::Null; + if let Some(uris) = type_data_json["uris"].as_array_mut() { + if !uris.is_empty() { + // Fix uri match values first, they are only allowed to be a number or null + // If it is a string, convert it to an int or null if that fails + for uri in &mut *uris { + if uri["match"].is_string() { + let match_value = match uri["match"].as_str().unwrap_or_default().parse::() { + Ok(n) => json!(n), + _ => Value::Null, + }; + uri["match"] = match_value; + } + } + type_data_json["uri"] = uris[0]["uri"].clone(); + } + } + + // Check if `passwordRevisionDate` is a valid date, else convert it + if let Some(pw_revision) = type_data_json["passwordRevisionDate"].as_str() { + type_data_json["passwordRevisionDate"] = json!(validate_and_format_date(pw_revision)); } } @@ -262,6 +279,19 @@ impl Cipher { } } + // Fix invalid SSH Entries + // This breaks at least the native mobile client if invalid + // The only way to fix this is by setting type_data_json to `null` + // Opening this ssh-key in the mobile client will probably crash the client, but you can edit, save and afterwards delete it + if self.atype == 5 + && (type_data_json["keyFingerprint"].as_str().is_none_or(|v| v.is_empty()) + || type_data_json["privateKey"].as_str().is_none_or(|v| v.is_empty()) + || type_data_json["publicKey"].as_str().is_none_or(|v| v.is_empty())) + { + warn!("Error parsing ssh-key, mandatory fields are invalid for {}", self.uuid); + type_data_json = Value::Null; + } + // Clone the type_data and add some default value. let mut data_json = type_data_json.clone(); @@ -279,7 +309,7 @@ impl Cipher { Cow::from(Vec::with_capacity(0)) } } else { - Cow::from(self.get_admin_collections(user_uuid.to_string(), conn).await) + Cow::from(self.get_admin_collections(user_uuid.clone(), conn).await) }; // There are three types of cipher response models in upstream @@ -296,7 +326,7 @@ impl Cipher { "creationDate": format_date(&self.created_at), "revisionDate": format_date(&self.updated_at), "deletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))), - "reprompt": self.reprompt.unwrap_or(RepromptType::None as i32), + "reprompt": self.reprompt.filter(|r| *r == RepromptType::None as i32 || *r == RepromptType::Password as i32).unwrap_or(RepromptType::None as i32), "organizationId": self.organization_uuid, "key": self.key, "attachments": attachments_json, @@ -328,7 +358,7 @@ impl Cipher { // Skip adding these fields in that case if sync_type == CipherSyncType::User { json_object["folderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data { - cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string()) + cipher_sync_data.cipher_folders.get(&self.uuid).cloned() } else { self.get_folder_uuid(user_uuid, conn).await }); @@ -357,7 +387,7 @@ impl Cipher { json_object } - pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec { + pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec { let mut user_uuids = Vec::new(); match self.user_uuid { Some(ref user_uuid) => { @@ -368,17 +398,16 @@ impl Cipher { // Belongs to Organization, need to update affected users if let Some(ref org_uuid) = self.organization_uuid { // users having access to the collection - let mut collection_users = - UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await; + let mut collection_users = Membership::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await; if CONFIG.org_groups_enabled() { // members of a group having access to the collection let group_users = - UserOrganization::find_by_cipher_and_org_with_group(&self.uuid, org_uuid, conn).await; + Membership::find_by_cipher_and_org_with_group(&self.uuid, org_uuid, conn).await; collection_users.extend(group_users); } - for user_org in collection_users { - User::update_uuid_revision(&user_org.user_uuid, conn).await; - user_uuids.push(user_org.user_uuid.clone()) + for member in collection_users { + User::update_uuid_revision(&member.user_uuid, conn).await; + user_uuids.push(member.user_uuid.clone()) } } } @@ -436,7 +465,7 @@ impl Cipher { }} } - pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult { // TODO: Optimize this by executing a DELETE directly on the database, instead of first fetching. for cipher in Self::find_by_org(org_uuid, conn).await { cipher.delete(conn).await?; @@ -444,7 +473,7 @@ impl Cipher { Ok(()) } - pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { for cipher in Self::find_owned_by_user(user_uuid, conn).await { cipher.delete(conn).await?; } @@ -462,52 +491,59 @@ impl Cipher { } } - pub async fn move_to_folder(&self, folder_uuid: Option, user_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn move_to_folder( + &self, + folder_uuid: Option, + user_uuid: &UserId, + conn: &mut DbConn, + ) -> EmptyResult { User::update_uuid_revision(user_uuid, conn).await; match (self.get_folder_uuid(user_uuid, conn).await, folder_uuid) { // No changes (None, None) => Ok(()), - (Some(ref old), Some(ref new)) if old == new => Ok(()), + (Some(ref old_folder), Some(ref new_folder)) if old_folder == new_folder => Ok(()), // Add to folder - (None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(conn).await, + (None, Some(new_folder)) => FolderCipher::new(new_folder, self.uuid.clone()).save(conn).await, // Remove from folder - (Some(old), None) => match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await { - Some(old) => old.delete(conn).await, - None => err!("Couldn't move from previous folder"), - }, + (Some(old_folder), None) => { + match FolderCipher::find_by_folder_and_cipher(&old_folder, &self.uuid, conn).await { + Some(old_folder) => old_folder.delete(conn).await, + None => err!("Couldn't move from previous folder"), + } + } // Move to another folder - (Some(old), Some(new)) => { - if let Some(old) = FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await { - old.delete(conn).await?; + (Some(old_folder), Some(new_folder)) => { + if let Some(old_folder) = FolderCipher::find_by_folder_and_cipher(&old_folder, &self.uuid, conn).await { + old_folder.delete(conn).await?; } - FolderCipher::new(&new, &self.uuid).save(conn).await + FolderCipher::new(new_folder, self.uuid.clone()).save(conn).await } } } /// Returns whether this cipher is directly owned by the user. - pub fn is_owned_by_user(&self, user_uuid: &str) -> bool { + pub fn is_owned_by_user(&self, user_uuid: &UserId) -> bool { self.user_uuid.is_some() && self.user_uuid.as_ref().unwrap() == user_uuid } /// Returns whether this cipher is owned by an org in which the user has full access. async fn is_in_full_access_org( &self, - user_uuid: &str, + user_uuid: &UserId, cipher_sync_data: Option<&CipherSyncData>, conn: &mut DbConn, ) -> bool { if let Some(ref org_uuid) = self.organization_uuid { if let Some(cipher_sync_data) = cipher_sync_data { - if let Some(cached_user_org) = cipher_sync_data.user_organizations.get(org_uuid) { - return cached_user_org.has_full_access(); + if let Some(cached_member) = cipher_sync_data.members.get(org_uuid) { + return cached_member.has_full_access(); } - } else if let Some(user_org) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn).await { - return user_org.has_full_access(); + } else if let Some(member) = Membership::find_by_user_and_org(user_uuid, org_uuid, conn).await { + return member.has_full_access(); } } false @@ -516,7 +552,7 @@ impl Cipher { /// Returns whether this cipher is owned by an group in which the user has full access. async fn is_in_full_access_group( &self, - user_uuid: &str, + user_uuid: &UserId, cipher_sync_data: Option<&CipherSyncData>, conn: &mut DbConn, ) -> bool { @@ -536,14 +572,14 @@ impl Cipher { /// Returns the user's access restrictions to this cipher. A return value /// of None means that this cipher does not belong to the user, and is /// not in any collection the user has access to. Otherwise, the user has - /// access to this cipher, and Some(read_only, hide_passwords) represents + /// access to this cipher, and Some(read_only, hide_passwords, manage) represents /// the access restrictions. pub async fn get_access_restrictions( &self, - user_uuid: &str, + user_uuid: &UserId, cipher_sync_data: Option<&CipherSyncData>, conn: &mut DbConn, - ) -> Option<(bool, bool)> { + ) -> Option<(bool, bool, bool)> { // Check whether this cipher is directly owned by the user, or is in // a collection that the user has full access to. If so, there are no // access restrictions. @@ -551,21 +587,21 @@ impl Cipher { || self.is_in_full_access_org(user_uuid, cipher_sync_data, conn).await || self.is_in_full_access_group(user_uuid, cipher_sync_data, conn).await { - return Some((false, false)); + return Some((false, false, true)); } let rows = if let Some(cipher_sync_data) = cipher_sync_data { - let mut rows: Vec<(bool, bool)> = Vec::new(); + let mut rows: Vec<(bool, bool, bool)> = Vec::new(); if let Some(collections) = cipher_sync_data.cipher_collections.get(&self.uuid) { for collection in collections { //User permissions - if let Some(uc) = cipher_sync_data.user_collections.get(collection) { - rows.push((uc.read_only, uc.hide_passwords)); + if let Some(cu) = cipher_sync_data.user_collections.get(collection) { + rows.push((cu.read_only, cu.hide_passwords, cu.manage)); } //Group permissions if let Some(cg) = cipher_sync_data.user_collections_groups.get(collection) { - rows.push((cg.read_only, cg.hide_passwords)); + rows.push((cg.read_only, cg.hide_passwords, cg.manage)); } } } @@ -592,15 +628,21 @@ impl Cipher { // booleans and this behavior isn't portable anyway. let mut read_only = true; let mut hide_passwords = true; - for (ro, hp) in rows.iter() { + let mut manage = false; + for (ro, hp, mn) in rows.iter() { read_only &= ro; hide_passwords &= hp; + manage &= mn; } - Some((read_only, hide_passwords)) + Some((read_only, hide_passwords, manage)) } - async fn get_user_collections_access_flags(&self, user_uuid: &str, conn: &mut DbConn) -> Vec<(bool, bool)> { + async fn get_user_collections_access_flags( + &self, + user_uuid: &UserId, + conn: &mut DbConn, + ) -> Vec<(bool, bool, bool)> { db_run! {conn: { // Check whether this cipher is in any collections accessible to the // user. If so, retrieve the access flags for each collection. @@ -611,13 +653,17 @@ impl Cipher { .inner_join(users_collections::table.on( ciphers_collections::collection_uuid.eq(users_collections::collection_uuid) .and(users_collections::user_uuid.eq(user_uuid)))) - .select((users_collections::read_only, users_collections::hide_passwords)) - .load::<(bool, bool)>(conn) + .select((users_collections::read_only, users_collections::hide_passwords, users_collections::manage)) + .load::<(bool, bool, bool)>(conn) .expect("Error getting user access restrictions") }} } - async fn get_group_collections_access_flags(&self, user_uuid: &str, conn: &mut DbConn) -> Vec<(bool, bool)> { + async fn get_group_collections_access_flags( + &self, + user_uuid: &UserId, + conn: &mut DbConn, + ) -> Vec<(bool, bool, bool)> { if !CONFIG.org_groups_enabled() { return Vec::new(); } @@ -637,49 +683,49 @@ impl Cipher { users_organizations::uuid.eq(groups_users::users_organizations_uuid) )) .filter(users_organizations::user_uuid.eq(user_uuid)) - .select((collections_groups::read_only, collections_groups::hide_passwords)) - .load::<(bool, bool)>(conn) + .select((collections_groups::read_only, collections_groups::hide_passwords, collections_groups::manage)) + .load::<(bool, bool, bool)>(conn) .expect("Error getting group access restrictions") }} } - pub async fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool { + pub async fn is_write_accessible_to_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool { match self.get_access_restrictions(user_uuid, None, conn).await { - Some((read_only, _hide_passwords)) => !read_only, + Some((read_only, _hide_passwords, manage)) => !read_only || manage, None => false, } } - pub async fn is_accessible_to_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool { + pub async fn is_accessible_to_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool { self.get_access_restrictions(user_uuid, None, conn).await.is_some() } // Returns whether this cipher is a favorite of the specified user. - pub async fn is_favorite(&self, user_uuid: &str, conn: &mut DbConn) -> bool { + pub async fn is_favorite(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool { Favorite::is_favorite(&self.uuid, user_uuid, conn).await } // Sets whether this cipher is a favorite of the specified user. - pub async fn set_favorite(&self, favorite: Option, user_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn set_favorite(&self, favorite: Option, user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { match favorite { None => Ok(()), // No change requested. Some(status) => Favorite::set_favorite(status, &self.uuid, user_uuid, conn).await, } } - pub async fn get_folder_uuid(&self, user_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &mut DbConn) -> Option { db_run! {conn: { folders_ciphers::table .inner_join(folders::table) .filter(folders::user_uuid.eq(&user_uuid)) .filter(folders_ciphers::cipher_uuid.eq(&self.uuid)) .select(folders_ciphers::folder_uuid) - .first::(conn) + .first::(conn) .ok() }} } - pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid(uuid: &CipherId, conn: &mut DbConn) -> Option { db_run! {conn: { ciphers::table .filter(ciphers::uuid.eq(uuid)) @@ -689,7 +735,11 @@ impl Cipher { }} } - pub async fn find_by_uuid_and_org(cipher_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid_and_org( + cipher_uuid: &CipherId, + org_uuid: &OrganizationId, + conn: &mut DbConn, + ) -> Option { db_run! {conn: { ciphers::table .filter(ciphers::uuid.eq(cipher_uuid)) @@ -712,7 +762,7 @@ impl Cipher { // true, then the non-interesting ciphers will not be returned. As a // result, those ciphers will not appear in "My Vault" for the org // owner/admin, but they can still be accessed via the org vault view. - pub async fn find_by_user(user_uuid: &str, visible_only: bool, conn: &mut DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &UserId, visible_only: bool, conn: &mut DbConn) -> Vec { if CONFIG.org_groups_enabled() { db_run! {conn: { let mut query = ciphers::table @@ -722,7 +772,7 @@ impl Cipher { .left_join(users_organizations::table.on( ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable()) .and(users_organizations::user_uuid.eq(user_uuid)) - .and(users_organizations::status.eq(UserOrgStatus::Confirmed as i32)) + .and(users_organizations::status.eq(MembershipStatus::Confirmed as i32)) )) .left_join(users_collections::table.on( ciphers_collections::collection_uuid.eq(users_collections::collection_uuid) @@ -749,7 +799,7 @@ impl Cipher { if !visible_only { query = query.or_filter( - users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin/owner + users_organizations::atype.le(MembershipType::Admin as i32) // Org admin/owner ); } @@ -767,7 +817,7 @@ impl Cipher { .left_join(users_organizations::table.on( ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable()) .and(users_organizations::user_uuid.eq(user_uuid)) - .and(users_organizations::status.eq(UserOrgStatus::Confirmed as i32)) + .and(users_organizations::status.eq(MembershipStatus::Confirmed as i32)) )) .left_join(users_collections::table.on( ciphers_collections::collection_uuid.eq(users_collections::collection_uuid) @@ -781,7 +831,7 @@ impl Cipher { if !visible_only { query = query.or_filter( - users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin/owner + users_organizations::atype.le(MembershipType::Admin as i32) // Org admin/owner ); } @@ -794,12 +844,12 @@ impl Cipher { } // Find all ciphers visible to the specified user. - pub async fn find_by_user_visible(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_user_visible(user_uuid: &UserId, conn: &mut DbConn) -> Vec { Self::find_by_user(user_uuid, true, conn).await } // Find all ciphers directly owned by the specified user. - pub async fn find_owned_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_owned_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! {conn: { ciphers::table .filter( @@ -810,7 +860,7 @@ impl Cipher { }} } - pub async fn count_owned_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 { + pub async fn count_owned_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 { db_run! {conn: { ciphers::table .filter(ciphers::user_uuid.eq(user_uuid)) @@ -821,7 +871,7 @@ impl Cipher { }} } - pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { db_run! {conn: { ciphers::table .filter(ciphers::organization_uuid.eq(org_uuid)) @@ -829,7 +879,7 @@ impl Cipher { }} } - pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 { + pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 { db_run! {conn: { ciphers::table .filter(ciphers::organization_uuid.eq(org_uuid)) @@ -840,7 +890,7 @@ impl Cipher { }} } - pub async fn find_by_folder(folder_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> Vec { db_run! {conn: { folders_ciphers::table.inner_join(ciphers::table) .filter(folders_ciphers::folder_uuid.eq(folder_uuid)) @@ -858,7 +908,7 @@ impl Cipher { }} } - pub async fn get_collections(&self, user_id: String, conn: &mut DbConn) -> Vec { + pub async fn get_collections(&self, user_uuid: UserId, conn: &mut DbConn) -> Vec { if CONFIG.org_groups_enabled() { db_run! {conn: { ciphers_collections::table @@ -868,11 +918,11 @@ impl Cipher { )) .left_join(users_organizations::table.on( users_organizations::org_uuid.eq(collections::org_uuid) - .and(users_organizations::user_uuid.eq(user_id.clone())) + .and(users_organizations::user_uuid.eq(user_uuid.clone())) )) .left_join(users_collections::table.on( users_collections::collection_uuid.eq(ciphers_collections::collection_uuid) - .and(users_collections::user_uuid.eq(user_id.clone())) + .and(users_collections::user_uuid.eq(user_uuid.clone())) )) .left_join(groups_users::table.on( groups_users::users_organizations_uuid.eq(users_organizations::uuid) @@ -883,14 +933,14 @@ impl Cipher { .and(collections_groups::groups_uuid.eq(groups::uuid)) )) .filter(users_organizations::access_all.eq(true) // User has access all - .or(users_collections::user_uuid.eq(user_id) // User has access to collection + .or(users_collections::user_uuid.eq(user_uuid) // User has access to collection .and(users_collections::read_only.eq(false))) .or(groups::access_all.eq(true)) // Access via groups .or(collections_groups::collections_uuid.is_not_null() // Access via groups .and(collections_groups::read_only.eq(false))) ) .select(ciphers_collections::collection_uuid) - .load::(conn).unwrap_or_default() + .load::(conn).unwrap_or_default() }} } else { db_run! {conn: { @@ -901,23 +951,23 @@ impl Cipher { )) .inner_join(users_organizations::table.on( users_organizations::org_uuid.eq(collections::org_uuid) - .and(users_organizations::user_uuid.eq(user_id.clone())) + .and(users_organizations::user_uuid.eq(user_uuid.clone())) )) .left_join(users_collections::table.on( users_collections::collection_uuid.eq(ciphers_collections::collection_uuid) - .and(users_collections::user_uuid.eq(user_id.clone())) + .and(users_collections::user_uuid.eq(user_uuid.clone())) )) .filter(users_organizations::access_all.eq(true) // User has access all - .or(users_collections::user_uuid.eq(user_id) // User has access to collection + .or(users_collections::user_uuid.eq(user_uuid) // User has access to collection .and(users_collections::read_only.eq(false))) ) .select(ciphers_collections::collection_uuid) - .load::(conn).unwrap_or_default() + .load::(conn).unwrap_or_default() }} } } - pub async fn get_admin_collections(&self, user_id: String, conn: &mut DbConn) -> Vec { + pub async fn get_admin_collections(&self, user_uuid: UserId, conn: &mut DbConn) -> Vec { if CONFIG.org_groups_enabled() { db_run! {conn: { ciphers_collections::table @@ -927,11 +977,11 @@ impl Cipher { )) .left_join(users_organizations::table.on( users_organizations::org_uuid.eq(collections::org_uuid) - .and(users_organizations::user_uuid.eq(user_id.clone())) + .and(users_organizations::user_uuid.eq(user_uuid.clone())) )) .left_join(users_collections::table.on( users_collections::collection_uuid.eq(ciphers_collections::collection_uuid) - .and(users_collections::user_uuid.eq(user_id.clone())) + .and(users_collections::user_uuid.eq(user_uuid.clone())) )) .left_join(groups_users::table.on( groups_users::users_organizations_uuid.eq(users_organizations::uuid) @@ -942,15 +992,15 @@ impl Cipher { .and(collections_groups::groups_uuid.eq(groups::uuid)) )) .filter(users_organizations::access_all.eq(true) // User has access all - .or(users_collections::user_uuid.eq(user_id) // User has access to collection + .or(users_collections::user_uuid.eq(user_uuid) // User has access to collection .and(users_collections::read_only.eq(false))) .or(groups::access_all.eq(true)) // Access via groups .or(collections_groups::collections_uuid.is_not_null() // Access via groups .and(collections_groups::read_only.eq(false))) - .or(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner + .or(users_organizations::atype.le(MembershipType::Admin as i32)) // User is admin or owner ) .select(ciphers_collections::collection_uuid) - .load::(conn).unwrap_or_default() + .load::(conn).unwrap_or_default() }} } else { db_run! {conn: { @@ -961,26 +1011,29 @@ impl Cipher { )) .inner_join(users_organizations::table.on( users_organizations::org_uuid.eq(collections::org_uuid) - .and(users_organizations::user_uuid.eq(user_id.clone())) + .and(users_organizations::user_uuid.eq(user_uuid.clone())) )) .left_join(users_collections::table.on( users_collections::collection_uuid.eq(ciphers_collections::collection_uuid) - .and(users_collections::user_uuid.eq(user_id.clone())) + .and(users_collections::user_uuid.eq(user_uuid.clone())) )) .filter(users_organizations::access_all.eq(true) // User has access all - .or(users_collections::user_uuid.eq(user_id) // User has access to collection + .or(users_collections::user_uuid.eq(user_uuid) // User has access to collection .and(users_collections::read_only.eq(false))) - .or(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner + .or(users_organizations::atype.le(MembershipType::Admin as i32)) // User is admin or owner ) .select(ciphers_collections::collection_uuid) - .load::(conn).unwrap_or_default() + .load::(conn).unwrap_or_default() }} } } /// Return a Vec with (cipher_uuid, collection_uuid) /// This is used during a full sync so we only need one query for all collections accessible. - pub async fn get_collections_with_cipher_by_user(user_id: String, conn: &mut DbConn) -> Vec<(String, String)> { + pub async fn get_collections_with_cipher_by_user( + user_uuid: UserId, + conn: &mut DbConn, + ) -> Vec<(CipherId, CollectionId)> { db_run! {conn: { ciphers_collections::table .inner_join(collections::table.on( @@ -988,12 +1041,12 @@ impl Cipher { )) .inner_join(users_organizations::table.on( users_organizations::org_uuid.eq(collections::org_uuid).and( - users_organizations::user_uuid.eq(user_id.clone()) + users_organizations::user_uuid.eq(user_uuid.clone()) ) )) .left_join(users_collections::table.on( users_collections::collection_uuid.eq(ciphers_collections::collection_uuid).and( - users_collections::user_uuid.eq(user_id.clone()) + users_collections::user_uuid.eq(user_uuid.clone()) ) )) .left_join(groups_users::table.on( @@ -1007,14 +1060,32 @@ impl Cipher { collections_groups::groups_uuid.eq(groups::uuid) ) )) - .or_filter(users_collections::user_uuid.eq(user_id)) // User has access to collection + .or_filter(users_collections::user_uuid.eq(user_uuid)) // User has access to collection .or_filter(users_organizations::access_all.eq(true)) // User has access all - .or_filter(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner + .or_filter(users_organizations::atype.le(MembershipType::Admin as i32)) // User is admin or owner .or_filter(groups::access_all.eq(true)) //Access via group .or_filter(collections_groups::collections_uuid.is_not_null()) //Access via group .select(ciphers_collections::all_columns) .distinct() - .load::<(String, String)>(conn).unwrap_or_default() + .load::<(CipherId, CollectionId)>(conn).unwrap_or_default() }} } } + +#[derive( + Clone, + Debug, + AsRef, + Deref, + DieselNewType, + Display, + From, + FromForm, + Hash, + PartialEq, + Eq, + Serialize, + Deserialize, + UuidFromParam, +)] +pub struct CipherId(String); diff --git a/src/db/models/collection.rs b/src/db/models/collection.rs index a26f22c7..abe5b400 100644 --- a/src/db/models/collection.rs +++ b/src/db/models/collection.rs @@ -1,15 +1,20 @@ +use derive_more::{AsRef, Deref, Display, From}; use serde_json::Value; -use super::{CollectionGroup, GroupUser, User, UserOrgStatus, UserOrgType, UserOrganization}; +use super::{ + CipherId, CollectionGroup, GroupUser, Membership, MembershipId, MembershipStatus, MembershipType, OrganizationId, + User, UserId, +}; use crate::CONFIG; +use macros::UuidFromParam; db_object! { #[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[diesel(table_name = collections)] #[diesel(primary_key(uuid))] pub struct Collection { - pub uuid: String, - pub org_uuid: String, + pub uuid: CollectionId, + pub org_uuid: OrganizationId, pub name: String, pub external_id: Option, } @@ -18,26 +23,27 @@ db_object! { #[diesel(table_name = users_collections)] #[diesel(primary_key(user_uuid, collection_uuid))] pub struct CollectionUser { - pub user_uuid: String, - pub collection_uuid: String, + pub user_uuid: UserId, + pub collection_uuid: CollectionId, pub read_only: bool, pub hide_passwords: bool, + pub manage: bool, } #[derive(Identifiable, Queryable, Insertable)] #[diesel(table_name = ciphers_collections)] #[diesel(primary_key(cipher_uuid, collection_uuid))] pub struct CollectionCipher { - pub cipher_uuid: String, - pub collection_uuid: String, + pub cipher_uuid: CipherId, + pub collection_uuid: CollectionId, } } /// Local methods impl Collection { - pub fn new(org_uuid: String, name: String, external_id: Option) -> Self { + pub fn new(org_uuid: OrganizationId, name: String, external_id: Option) -> Self { let mut new_model = Self { - uuid: crate::util::get_uuid(), + uuid: CollectionId(crate::util::get_uuid()), org_uuid, name, external_id: None, @@ -74,22 +80,30 @@ impl Collection { pub async fn to_json_details( &self, - user_uuid: &str, + user_uuid: &UserId, cipher_sync_data: Option<&crate::api::core::CipherSyncData>, conn: &mut DbConn, ) -> Value { - let (read_only, hide_passwords, can_manage) = if let Some(cipher_sync_data) = cipher_sync_data { - match cipher_sync_data.user_organizations.get(&self.org_uuid) { - // Only for Manager types Bitwarden returns true for the can_manage option - // Owners and Admins always have true - Some(uo) if uo.has_full_access() => (false, false, uo.atype >= UserOrgType::Manager), - Some(uo) => { + let (read_only, hide_passwords, manage) = if let Some(cipher_sync_data) = cipher_sync_data { + match cipher_sync_data.members.get(&self.org_uuid) { + // Only for Manager types Bitwarden returns true for the manage option + // Owners and Admins always have true. Users are not able to have full access + Some(m) if m.has_full_access() => (false, false, m.atype >= MembershipType::Manager), + Some(m) => { // Only let a manager manage collections when the have full read/write access - let is_manager = uo.atype == UserOrgType::Manager; - if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) { - (uc.read_only, uc.hide_passwords, is_manager && !uc.read_only && !uc.hide_passwords) + let is_manager = m.atype == MembershipType::Manager; + if let Some(cu) = cipher_sync_data.user_collections.get(&self.uuid) { + ( + cu.read_only, + cu.hide_passwords, + cu.manage || (is_manager && !cu.read_only && !cu.hide_passwords), + ) } else if let Some(cg) = cipher_sync_data.user_collections_groups.get(&self.uuid) { - (cg.read_only, cg.hide_passwords, is_manager && !cg.read_only && !cg.hide_passwords) + ( + cg.read_only, + cg.hide_passwords, + cg.manage || (is_manager && !cg.read_only && !cg.hide_passwords), + ) } else { (false, false, false) } @@ -97,19 +111,16 @@ impl Collection { _ => (true, true, false), } } else { - match UserOrganization::find_confirmed_by_user_and_org(user_uuid, &self.org_uuid, conn).await { - Some(ou) if ou.has_full_access() => (false, false, ou.atype >= UserOrgType::Manager), - Some(ou) => { - let is_manager = ou.atype == UserOrgType::Manager; + match Membership::find_confirmed_by_user_and_org(user_uuid, &self.org_uuid, conn).await { + Some(m) if m.has_full_access() => (false, false, m.atype >= MembershipType::Manager), + Some(_) if self.is_manageable_by_user(user_uuid, conn).await => (false, false, true), + Some(m) => { + let is_manager = m.atype == MembershipType::Manager; let read_only = !self.is_writable_by_user(user_uuid, conn).await; let hide_passwords = self.hide_passwords_for_user(user_uuid, conn).await; (read_only, hide_passwords, is_manager && !read_only && !hide_passwords) } - _ => ( - !self.is_writable_by_user(user_uuid, conn).await, - self.hide_passwords_for_user(user_uuid, conn).await, - false, - ), + _ => (true, true, false), } }; @@ -117,17 +128,17 @@ impl Collection { json_object["object"] = json!("collectionDetails"); json_object["readOnly"] = json!(read_only); json_object["hidePasswords"] = json!(hide_passwords); - json_object["manage"] = json!(can_manage); + json_object["manage"] = json!(manage); json_object } - pub async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool { - org_user.has_status(UserOrgStatus::Confirmed) - && (org_user.has_full_access() - || CollectionUser::has_access_to_collection_by_user(col_id, &org_user.user_uuid, conn).await + pub async fn can_access_collection(member: &Membership, col_id: &CollectionId, conn: &mut DbConn) -> bool { + member.has_status(MembershipStatus::Confirmed) + && (member.has_full_access() + || CollectionUser::has_access_to_collection_by_user(col_id, &member.user_uuid, conn).await || (CONFIG.org_groups_enabled() - && (GroupUser::has_full_access_by_member(&org_user.org_uuid, &org_user.uuid, conn).await - || GroupUser::has_access_to_collection_by_member(col_id, &org_user.uuid, conn).await))) + && (GroupUser::has_full_access_by_member(&member.org_uuid, &member.uuid, conn).await + || GroupUser::has_access_to_collection_by_member(col_id, &member.uuid, conn).await))) } } @@ -185,7 +196,7 @@ impl Collection { }} } - pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult { for collection in Self::find_by_organization(org_uuid, conn).await { collection.delete(conn).await?; } @@ -193,12 +204,12 @@ impl Collection { } pub async fn update_users_revision(&self, conn: &mut DbConn) { - for user_org in UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).await.iter() { - User::update_uuid_revision(&user_org.user_uuid, conn).await; + for member in Membership::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).await.iter() { + User::update_uuid_revision(&member.user_uuid, conn).await; } } - pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid(uuid: &CollectionId, conn: &mut DbConn) -> Option { db_run! { conn: { collections::table .filter(collections::uuid.eq(uuid)) @@ -208,7 +219,7 @@ impl Collection { }} } - pub async fn find_by_user_uuid(user_uuid: String, conn: &mut DbConn) -> Vec { + pub async fn find_by_user_uuid(user_uuid: UserId, conn: &mut DbConn) -> Vec { if CONFIG.org_groups_enabled() { db_run! { conn: { collections::table @@ -234,7 +245,7 @@ impl Collection { ) )) .filter( - users_organizations::status.eq(UserOrgStatus::Confirmed as i32) + users_organizations::status.eq(MembershipStatus::Confirmed as i32) ) .filter( users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection @@ -265,7 +276,7 @@ impl Collection { ) )) .filter( - users_organizations::status.eq(UserOrgStatus::Confirmed as i32) + users_organizations::status.eq(MembershipStatus::Confirmed as i32) ) .filter( users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection @@ -279,15 +290,19 @@ impl Collection { } } - pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_organization_and_user_uuid( + org_uuid: &OrganizationId, + user_uuid: &UserId, + conn: &mut DbConn, + ) -> Vec { Self::find_by_user_uuid(user_uuid.to_owned(), conn) .await .into_iter() - .filter(|c| c.org_uuid == org_uuid) + .filter(|c| &c.org_uuid == org_uuid) .collect() } - pub async fn find_by_organization(org_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { db_run! { conn: { collections::table .filter(collections::org_uuid.eq(org_uuid)) @@ -297,7 +312,7 @@ impl Collection { }} } - pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 { + pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 { db_run! { conn: { collections::table .filter(collections::org_uuid.eq(org_uuid)) @@ -308,7 +323,11 @@ impl Collection { }} } - pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid_and_org( + uuid: &CollectionId, + org_uuid: &OrganizationId, + conn: &mut DbConn, + ) -> Option { db_run! { conn: { collections::table .filter(collections::uuid.eq(uuid)) @@ -320,7 +339,7 @@ impl Collection { }} } - pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: String, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid_and_user(uuid: &CollectionId, user_uuid: UserId, conn: &mut DbConn) -> Option { if CONFIG.org_groups_enabled() { db_run! { conn: { collections::table @@ -349,7 +368,7 @@ impl Collection { .filter( users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection users_organizations::access_all.eq(true).or( // access_all in Organization - users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner + users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner )).or( groups::access_all.eq(true) // access_all in groups ).or( // access via groups @@ -378,7 +397,7 @@ impl Collection { .filter( users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection users_organizations::access_all.eq(true).or( // access_all in Organization - users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner + users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner )) ).select(collections::all_columns) .first::(conn).ok() @@ -387,7 +406,7 @@ impl Collection { } } - pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool { + pub async fn is_writable_by_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool { let user_uuid = user_uuid.to_string(); if CONFIG.org_groups_enabled() { db_run! { conn: { @@ -411,7 +430,7 @@ impl Collection { collections_groups::groups_uuid.eq(groups_users::groups_uuid) .and(collections_groups::collections_uuid.eq(collections::uuid)) )) - .filter(users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner + .filter(users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner .or(users_organizations::access_all.eq(true)) // access_all via membership .or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection .and(users_collections::read_only.eq(false))) @@ -436,7 +455,7 @@ impl Collection { users_collections::collection_uuid.eq(collections::uuid) .and(users_collections::user_uuid.eq(user_uuid)) )) - .filter(users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner + .filter(users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner .or(users_organizations::access_all.eq(true)) // access_all via membership .or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection .and(users_collections::read_only.eq(false))) @@ -449,7 +468,7 @@ impl Collection { } } - pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool { + pub async fn hide_passwords_for_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool { let user_uuid = user_uuid.to_string(); db_run! { conn: { collections::table @@ -478,7 +497,7 @@ impl Collection { .filter( users_collections::collection_uuid.eq(&self.uuid).and(users_collections::hide_passwords.eq(true)).or(// Directly accessed collection users_organizations::access_all.eq(true).or( // access_all in Organization - users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner + users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner )).or( groups::access_all.eq(true) // access_all in groups ).or( // access via groups @@ -494,11 +513,61 @@ impl Collection { .unwrap_or(0) != 0 }} } + + pub async fn is_manageable_by_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool { + let user_uuid = user_uuid.to_string(); + db_run! { conn: { + collections::table + .left_join(users_collections::table.on( + users_collections::collection_uuid.eq(collections::uuid).and( + users_collections::user_uuid.eq(user_uuid.clone()) + ) + )) + .left_join(users_organizations::table.on( + collections::org_uuid.eq(users_organizations::org_uuid).and( + users_organizations::user_uuid.eq(user_uuid) + ) + )) + .left_join(groups_users::table.on( + groups_users::users_organizations_uuid.eq(users_organizations::uuid) + )) + .left_join(groups::table.on( + groups::uuid.eq(groups_users::groups_uuid) + )) + .left_join(collections_groups::table.on( + collections_groups::groups_uuid.eq(groups_users::groups_uuid).and( + collections_groups::collections_uuid.eq(collections::uuid) + ) + )) + .filter(collections::uuid.eq(&self.uuid)) + .filter( + users_collections::collection_uuid.eq(&self.uuid).and(users_collections::manage.eq(true)).or(// Directly accessed collection + users_organizations::access_all.eq(true).or( // access_all in Organization + users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner + )).or( + groups::access_all.eq(true) // access_all in groups + ).or( // access via groups + groups_users::users_organizations_uuid.eq(users_organizations::uuid).and( + collections_groups::collections_uuid.is_not_null().and( + collections_groups::manage.eq(true)) + ) + ) + ) + .count() + .first::(conn) + .ok() + .unwrap_or(0) != 0 + }} + } } /// Database methods impl CollectionUser { - pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_organization_and_user_uuid( + org_uuid: &OrganizationId, + user_uuid: &UserId, + conn: &mut DbConn, + ) -> Vec { db_run! { conn: { users_collections::table .filter(users_collections::user_uuid.eq(user_uuid)) @@ -511,24 +580,30 @@ impl CollectionUser { }} } - pub async fn find_by_organization(org_uuid: &str, conn: &mut DbConn) -> Vec { - db_run! { conn: { + pub async fn find_by_organization_swap_user_uuid_with_member_uuid( + org_uuid: &OrganizationId, + conn: &mut DbConn, + ) -> Vec { + let col_users = db_run! { conn: { users_collections::table .inner_join(collections::table.on(collections::uuid.eq(users_collections::collection_uuid))) .filter(collections::org_uuid.eq(org_uuid)) .inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid))) - .select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords)) + .filter(users_organizations::org_uuid.eq(org_uuid)) + .select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords, users_collections::manage)) .load::(conn) .expect("Error loading users_collections") .from_db() - }} + }}; + col_users.into_iter().map(|c| c.into()).collect() } pub async fn save( - user_uuid: &str, - collection_uuid: &str, + user_uuid: &UserId, + collection_uuid: &CollectionId, read_only: bool, hide_passwords: bool, + manage: bool, conn: &mut DbConn, ) -> EmptyResult { User::update_uuid_revision(user_uuid, conn).await; @@ -541,6 +616,7 @@ impl CollectionUser { users_collections::collection_uuid.eq(collection_uuid), users_collections::read_only.eq(read_only), users_collections::hide_passwords.eq(hide_passwords), + users_collections::manage.eq(manage), )) .execute(conn) { @@ -555,6 +631,7 @@ impl CollectionUser { users_collections::collection_uuid.eq(collection_uuid), users_collections::read_only.eq(read_only), users_collections::hide_passwords.eq(hide_passwords), + users_collections::manage.eq(manage), )) .execute(conn) .map_res("Error adding user to collection") @@ -569,12 +646,14 @@ impl CollectionUser { users_collections::collection_uuid.eq(collection_uuid), users_collections::read_only.eq(read_only), users_collections::hide_passwords.eq(hide_passwords), + users_collections::manage.eq(manage), )) .on_conflict((users_collections::user_uuid, users_collections::collection_uuid)) .do_update() .set(( users_collections::read_only.eq(read_only), users_collections::hide_passwords.eq(hide_passwords), + users_collections::manage.eq(manage), )) .execute(conn) .map_res("Error adding user to collection") @@ -596,7 +675,7 @@ impl CollectionUser { }} } - pub async fn find_by_collection(collection_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> Vec { db_run! { conn: { users_collections::table .filter(users_collections::collection_uuid.eq(collection_uuid)) @@ -607,24 +686,27 @@ impl CollectionUser { }} } - pub async fn find_by_collection_swap_user_uuid_with_org_user_uuid( - collection_uuid: &str, + pub async fn find_by_org_and_coll_swap_user_uuid_with_member_uuid( + org_uuid: &OrganizationId, + collection_uuid: &CollectionId, conn: &mut DbConn, - ) -> Vec { - db_run! { conn: { + ) -> Vec { + let col_users = db_run! { conn: { users_collections::table .filter(users_collections::collection_uuid.eq(collection_uuid)) + .filter(users_organizations::org_uuid.eq(org_uuid)) .inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid))) - .select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords)) + .select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords, users_collections::manage)) .load::(conn) .expect("Error loading users_collections") .from_db() - }} + }}; + col_users.into_iter().map(|c| c.into()).collect() } pub async fn find_by_collection_and_user( - collection_uuid: &str, - user_uuid: &str, + collection_uuid: &CollectionId, + user_uuid: &UserId, conn: &mut DbConn, ) -> Option { db_run! { conn: { @@ -638,7 +720,7 @@ impl CollectionUser { }} } - pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { users_collections::table .filter(users_collections::user_uuid.eq(user_uuid)) @@ -649,7 +731,7 @@ impl CollectionUser { }} } - pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult { for collection in CollectionUser::find_by_collection(collection_uuid, conn).await.iter() { User::update_uuid_revision(&collection.user_uuid, conn).await; } @@ -661,7 +743,11 @@ impl CollectionUser { }} } - pub async fn delete_all_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user_and_org( + user_uuid: &UserId, + org_uuid: &OrganizationId, + conn: &mut DbConn, + ) -> EmptyResult { let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn).await; db_run! { conn: { @@ -677,14 +763,18 @@ impl CollectionUser { }} } - pub async fn has_access_to_collection_by_user(col_id: &str, user_uuid: &str, conn: &mut DbConn) -> bool { + pub async fn has_access_to_collection_by_user( + col_id: &CollectionId, + user_uuid: &UserId, + conn: &mut DbConn, + ) -> bool { Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some() } } /// Database methods impl CollectionCipher { - pub async fn save(cipher_uuid: &str, collection_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn save(cipher_uuid: &CipherId, collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult { Self::update_users_revision(collection_uuid, conn).await; db_run! { conn: @@ -714,7 +804,7 @@ impl CollectionCipher { } } - pub async fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(cipher_uuid: &CipherId, collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult { Self::update_users_revision(collection_uuid, conn).await; db_run! { conn: { @@ -728,7 +818,7 @@ impl CollectionCipher { }} } - pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(ciphers_collections::table.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid))) .execute(conn) @@ -736,7 +826,7 @@ impl CollectionCipher { }} } - pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(ciphers_collections::table.filter(ciphers_collections::collection_uuid.eq(collection_uuid))) .execute(conn) @@ -744,9 +834,63 @@ impl CollectionCipher { }} } - pub async fn update_users_revision(collection_uuid: &str, conn: &mut DbConn) { + pub async fn update_users_revision(collection_uuid: &CollectionId, conn: &mut DbConn) { if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn).await { collection.update_users_revision(conn).await; } } } + +// Added in case we need the membership_uuid instead of the user_uuid +pub struct CollectionMembership { + pub membership_uuid: MembershipId, + pub collection_uuid: CollectionId, + pub read_only: bool, + pub hide_passwords: bool, + pub manage: bool, +} + +impl CollectionMembership { + pub fn to_json_details_for_member(&self, membership_type: i32) -> Value { + json!({ + "id": self.membership_uuid, + "readOnly": self.read_only, + "hidePasswords": self.hide_passwords, + "manage": membership_type >= MembershipType::Admin + || self.manage + || (membership_type == MembershipType::Manager + && !self.read_only + && !self.hide_passwords), + }) + } +} + +impl From for CollectionMembership { + fn from(c: CollectionUser) -> Self { + Self { + membership_uuid: c.user_uuid.to_string().into(), + collection_uuid: c.collection_uuid, + read_only: c.read_only, + hide_passwords: c.hide_passwords, + manage: c.manage, + } + } +} + +#[derive( + Clone, + Debug, + AsRef, + Deref, + DieselNewType, + Display, + From, + FromForm, + Hash, + PartialEq, + Eq, + Serialize, + Deserialize, + UuidFromParam, +)] +pub struct CollectionId(String); diff --git a/src/db/models/device.rs b/src/db/models/device.rs index 8feab49d..74ef46d2 100644 --- a/src/db/models/device.rs +++ b/src/db/models/device.rs @@ -1,7 +1,10 @@ use chrono::{NaiveDateTime, Utc}; +use derive_more::{Display, From}; +use serde_json::Value; -use crate::{crypto, CONFIG}; -use core::fmt; +use super::{AuthRequest, UserId}; +use crate::{crypto, util::format_date, CONFIG}; +use macros::IdFromParam; db_object! { #[derive(Identifiable, Queryable, Insertable, AsChangeset)] @@ -9,11 +12,11 @@ db_object! { #[diesel(treat_none_as_null = true)] #[diesel(primary_key(uuid, user_uuid))] pub struct Device { - pub uuid: String, + pub uuid: DeviceId, pub created_at: NaiveDateTime, pub updated_at: NaiveDateTime, - pub user_uuid: String, + pub user_uuid: UserId, pub name: String, pub atype: i32, // https://github.com/bitwarden/server/blob/dcc199bcce4aa2d5621f6fab80f1b49d8b143418/src/Core/Enums/DeviceType.cs @@ -21,14 +24,13 @@ db_object! { pub push_token: Option, pub refresh_token: String, - pub twofactor_remember: Option, } } /// Local methods impl Device { - pub fn new(uuid: String, user_uuid: String, name: String, atype: i32) -> Self { + pub fn new(uuid: DeviceId, user_uuid: UserId, name: String, atype: i32) -> Self { let now = Utc::now().naive_utc(); Self { @@ -47,6 +49,18 @@ impl Device { } } + pub fn to_json(&self) -> Value { + json!({ + "id": self.uuid, + "name": self.name, + "type": self.atype, + "identifier": self.push_uuid, + "creationDate": format_date(&self.created_at), + "isTrusted": false, + "object":"device" + }) + } + pub fn refresh_twofactor_remember(&mut self) -> String { use data_encoding::BASE64; let twofactor_remember = crypto::encode_random_bytes::<180>(BASE64); @@ -75,12 +89,12 @@ impl Device { // Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients // Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out // --- - // fn arg: orgs: Vec, + // fn arg: members: Vec, // --- - // let orgowner: Vec<_> = orgs.iter().filter(|o| o.atype == 0).map(|o| o.org_uuid.clone()).collect(); - // let orgadmin: Vec<_> = orgs.iter().filter(|o| o.atype == 1).map(|o| o.org_uuid.clone()).collect(); - // let orguser: Vec<_> = orgs.iter().filter(|o| o.atype == 2).map(|o| o.org_uuid.clone()).collect(); - // let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect(); + // let orgowner: Vec<_> = members.iter().filter(|m| m.atype == 0).map(|o| o.org_uuid.clone()).collect(); + // let orgadmin: Vec<_> = members.iter().filter(|m| m.atype == 1).map(|o| o.org_uuid.clone()).collect(); + // let orguser: Vec<_> = members.iter().filter(|m| m.atype == 2).map(|o| o.org_uuid.clone()).collect(); + // let orgmanager: Vec<_> = members.iter().filter(|m| m.atype == 3).map(|o| o.org_uuid.clone()).collect(); // Create the JWT claims struct, to send to the client use crate::auth::{encode_jwt, LoginJwtClaims, DEFAULT_VALIDITY, JWT_LOGIN_ISSUER}; @@ -123,6 +137,36 @@ impl Device { } } +pub struct DeviceWithAuthRequest { + pub device: Device, + pub pending_auth_request: Option, +} + +impl DeviceWithAuthRequest { + pub fn to_json(&self) -> Value { + let auth_request = match &self.pending_auth_request { + Some(auth_request) => auth_request.to_json_for_pending_device(), + None => Value::Null, + }; + json!({ + "id": self.device.uuid, + "name": self.device.name, + "type": self.device.atype, + "identifier": self.device.push_uuid, + "creationDate": format_date(&self.device.created_at), + "devicePendingAuthRequest": auth_request, + "isTrusted": false, + "object": "device", + }) + } + + pub fn from(c: Device, a: Option) -> Self { + Self { + device: c, + pending_auth_request: a, + } + } +} use crate::db::DbConn; use crate::api::EmptyResult; @@ -150,7 +194,7 @@ impl Device { } } - pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(devices::table.filter(devices::user_uuid.eq(user_uuid))) .execute(conn) @@ -158,7 +202,7 @@ impl Device { }} } - pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid_and_user(uuid: &DeviceId, user_uuid: &UserId, conn: &mut DbConn) -> Option { db_run! { conn: { devices::table .filter(devices::uuid.eq(uuid)) @@ -169,7 +213,17 @@ impl Device { }} } - pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_with_auth_request_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + let devices = Self::find_by_user(user_uuid, conn).await; + let mut result = Vec::new(); + for device in devices { + let auth_request = AuthRequest::find_by_user_and_requested_device(user_uuid, &device.uuid, conn).await; + result.push(DeviceWithAuthRequest::from(device, auth_request)); + } + result + } + + pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { devices::table .filter(devices::user_uuid.eq(user_uuid)) @@ -179,7 +233,7 @@ impl Device { }} } - pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid(uuid: &DeviceId, conn: &mut DbConn) -> Option { db_run! { conn: { devices::table .filter(devices::uuid.eq(uuid)) @@ -189,7 +243,7 @@ impl Device { }} } - pub async fn clear_push_token_by_uuid(uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn clear_push_token_by_uuid(uuid: &DeviceId, conn: &mut DbConn) -> EmptyResult { db_run! { conn: { diesel::update(devices::table) .filter(devices::uuid.eq(uuid)) @@ -208,7 +262,7 @@ impl Device { }} } - pub async fn find_latest_active_by_user(user_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_latest_active_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Option { db_run! { conn: { devices::table .filter(devices::user_uuid.eq(user_uuid)) @@ -219,7 +273,7 @@ impl Device { }} } - pub async fn find_push_devices_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_push_devices_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { devices::table .filter(devices::user_uuid.eq(user_uuid)) @@ -230,7 +284,7 @@ impl Device { }} } - pub async fn check_user_has_push_device(user_uuid: &str, conn: &mut DbConn) -> bool { + pub async fn check_user_has_push_device(user_uuid: &UserId, conn: &mut DbConn) -> bool { db_run! { conn: { devices::table .filter(devices::user_uuid.eq(user_uuid)) @@ -243,68 +297,62 @@ impl Device { } } +#[derive(Display)] pub enum DeviceType { + #[display("Android")] Android = 0, + #[display("iOS")] Ios = 1, + #[display("Chrome Extension")] ChromeExtension = 2, + #[display("Firefox Extension")] FirefoxExtension = 3, + #[display("Opera Extension")] OperaExtension = 4, + #[display("Edge Extension")] EdgeExtension = 5, + #[display("Windows")] WindowsDesktop = 6, + #[display("macOS")] MacOsDesktop = 7, + #[display("Linux")] LinuxDesktop = 8, + #[display("Chrome")] ChromeBrowser = 9, + #[display("Firefox")] FirefoxBrowser = 10, + #[display("Opera")] OperaBrowser = 11, + #[display("Edge")] EdgeBrowser = 12, + #[display("Internet Explorer")] IEBrowser = 13, + #[display("Unknown Browser")] UnknownBrowser = 14, + #[display("Android")] AndroidAmazon = 15, + #[display("UWP")] Uwp = 16, + #[display("Safari")] SafariBrowser = 17, + #[display("Vivaldi")] VivaldiBrowser = 18, + #[display("Vivaldi Extension")] VivaldiExtension = 19, + #[display("Safari Extension")] SafariExtension = 20, + #[display("SDK")] Sdk = 21, + #[display("Server")] Server = 22, + #[display("Windows CLI")] WindowsCLI = 23, + #[display("macOS CLI")] MacOsCLI = 24, + #[display("Linux CLI")] LinuxCLI = 25, } -impl fmt::Display for DeviceType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - DeviceType::Android => write!(f, "Android"), - DeviceType::Ios => write!(f, "iOS"), - DeviceType::ChromeExtension => write!(f, "Chrome Extension"), - DeviceType::FirefoxExtension => write!(f, "Firefox Extension"), - DeviceType::OperaExtension => write!(f, "Opera Extension"), - DeviceType::EdgeExtension => write!(f, "Edge Extension"), - DeviceType::WindowsDesktop => write!(f, "Windows"), - DeviceType::MacOsDesktop => write!(f, "macOS"), - DeviceType::LinuxDesktop => write!(f, "Linux"), - DeviceType::ChromeBrowser => write!(f, "Chrome"), - DeviceType::FirefoxBrowser => write!(f, "Firefox"), - DeviceType::OperaBrowser => write!(f, "Opera"), - DeviceType::EdgeBrowser => write!(f, "Edge"), - DeviceType::IEBrowser => write!(f, "Internet Explorer"), - DeviceType::UnknownBrowser => write!(f, "Unknown Browser"), - DeviceType::AndroidAmazon => write!(f, "Android"), - DeviceType::Uwp => write!(f, "UWP"), - DeviceType::SafariBrowser => write!(f, "Safari"), - DeviceType::VivaldiBrowser => write!(f, "Vivaldi"), - DeviceType::VivaldiExtension => write!(f, "Vivaldi Extension"), - DeviceType::SafariExtension => write!(f, "Safari Extension"), - DeviceType::Sdk => write!(f, "SDK"), - DeviceType::Server => write!(f, "Server"), - DeviceType::WindowsCLI => write!(f, "Windows CLI"), - DeviceType::MacOsCLI => write!(f, "macOS CLI"), - DeviceType::LinuxCLI => write!(f, "Linux CLI"), - } - } -} - impl DeviceType { pub fn from_i32(value: i32) -> DeviceType { match value { @@ -338,3 +386,8 @@ impl DeviceType { } } } + +#[derive( + Clone, Debug, DieselNewType, Display, From, FromForm, Hash, PartialEq, Eq, Serialize, Deserialize, IdFromParam, +)] +pub struct DeviceId(String); diff --git a/src/db/models/emergency_access.rs b/src/db/models/emergency_access.rs index f4f3b9a9..a82801f6 100644 --- a/src/db/models/emergency_access.rs +++ b/src/db/models/emergency_access.rs @@ -1,9 +1,10 @@ use chrono::{NaiveDateTime, Utc}; +use derive_more::{AsRef, Deref, Display, From}; use serde_json::Value; +use super::{User, UserId}; use crate::{api::EmptyResult, db::DbConn, error::MapResult}; - -use super::User; +use macros::UuidFromParam; db_object! { #[derive(Identifiable, Queryable, Insertable, AsChangeset)] @@ -11,9 +12,9 @@ db_object! { #[diesel(treat_none_as_null = true)] #[diesel(primary_key(uuid))] pub struct EmergencyAccess { - pub uuid: String, - pub grantor_uuid: String, - pub grantee_uuid: Option, + pub uuid: EmergencyAccessId, + pub grantor_uuid: UserId, + pub grantee_uuid: Option, pub email: Option, pub key_encrypted: Option, pub atype: i32, //EmergencyAccessType @@ -29,11 +30,11 @@ db_object! { // Local methods impl EmergencyAccess { - pub fn new(grantor_uuid: String, email: String, status: i32, atype: i32, wait_time_days: i32) -> Self { + pub fn new(grantor_uuid: UserId, email: String, status: i32, atype: i32, wait_time_days: i32) -> Self { let now = Utc::now().naive_utc(); Self { - uuid: crate::util::get_uuid(), + uuid: EmergencyAccessId(crate::util::get_uuid()), grantor_uuid, grantee_uuid: None, email: Some(email), @@ -82,7 +83,7 @@ impl EmergencyAccess { } pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option { - let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() { + let grantee_user = if let Some(grantee_uuid) = &self.grantee_uuid { User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.") } else if let Some(email) = self.email.as_deref() { match User::find_by_mail(email, conn).await { @@ -211,7 +212,7 @@ impl EmergencyAccess { }} } - pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { for ea in Self::find_all_by_grantor_uuid(user_uuid, conn).await { ea.delete(conn).await?; } @@ -239,8 +240,8 @@ impl EmergencyAccess { } pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email( - grantor_uuid: &str, - grantee_uuid: &str, + grantor_uuid: &UserId, + grantee_uuid: &UserId, email: &str, conn: &mut DbConn, ) -> Option { @@ -262,7 +263,11 @@ impl EmergencyAccess { }} } - pub async fn find_by_uuid_and_grantor_uuid(uuid: &str, grantor_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid_and_grantor_uuid( + uuid: &EmergencyAccessId, + grantor_uuid: &UserId, + conn: &mut DbConn, + ) -> Option { db_run! { conn: { emergency_access::table .filter(emergency_access::uuid.eq(uuid)) @@ -272,7 +277,11 @@ impl EmergencyAccess { }} } - pub async fn find_by_uuid_and_grantee_uuid(uuid: &str, grantee_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid_and_grantee_uuid( + uuid: &EmergencyAccessId, + grantee_uuid: &UserId, + conn: &mut DbConn, + ) -> Option { db_run! { conn: { emergency_access::table .filter(emergency_access::uuid.eq(uuid)) @@ -282,7 +291,11 @@ impl EmergencyAccess { }} } - pub async fn find_by_uuid_and_grantee_email(uuid: &str, grantee_email: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid_and_grantee_email( + uuid: &EmergencyAccessId, + grantee_email: &str, + conn: &mut DbConn, + ) -> Option { db_run! { conn: { emergency_access::table .filter(emergency_access::uuid.eq(uuid)) @@ -292,7 +305,7 @@ impl EmergencyAccess { }} } - pub async fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_all_by_grantee_uuid(grantee_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { emergency_access::table .filter(emergency_access::grantee_uuid.eq(grantee_uuid)) @@ -319,7 +332,7 @@ impl EmergencyAccess { }} } - pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_all_by_grantor_uuid(grantor_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { emergency_access::table .filter(emergency_access::grantor_uuid.eq(grantor_uuid)) @@ -327,7 +340,12 @@ impl EmergencyAccess { }} } - pub async fn accept_invite(&mut self, grantee_uuid: &str, grantee_email: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn accept_invite( + &mut self, + grantee_uuid: &UserId, + grantee_email: &str, + conn: &mut DbConn, + ) -> EmptyResult { if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email { err!("User email does not match invite."); } @@ -337,10 +355,28 @@ impl EmergencyAccess { } self.status = EmergencyAccessStatus::Accepted as i32; - self.grantee_uuid = Some(String::from(grantee_uuid)); + self.grantee_uuid = Some(grantee_uuid.clone()); self.email = None; self.save(conn).await } } // endregion + +#[derive( + Clone, + Debug, + AsRef, + Deref, + DieselNewType, + Display, + From, + FromForm, + Hash, + PartialEq, + Eq, + Serialize, + Deserialize, + UuidFromParam, +)] +pub struct EmergencyAccessId(String); diff --git a/src/db/models/event.rs b/src/db/models/event.rs index 22d8fb00..ed4582b1 100644 --- a/src/db/models/event.rs +++ b/src/db/models/event.rs @@ -1,9 +1,9 @@ -use crate::db::DbConn; +use chrono::{NaiveDateTime, TimeDelta, Utc}; +//use derive_more::{AsRef, Deref, Display, From}; use serde_json::Value; -use crate::{api::EmptyResult, error::MapResult, CONFIG}; - -use chrono::{NaiveDateTime, TimeDelta, Utc}; +use super::{CipherId, CollectionId, GroupId, MembershipId, OrgPolicyId, OrganizationId, UserId}; +use crate::{api::EmptyResult, db::DbConn, error::MapResult, CONFIG}; // https://bitwarden.com/help/event-logs/ @@ -15,20 +15,20 @@ db_object! { #[diesel(table_name = event)] #[diesel(primary_key(uuid))] pub struct Event { - pub uuid: String, + pub uuid: EventId, pub event_type: i32, // EventType - pub user_uuid: Option, - pub org_uuid: Option, - pub cipher_uuid: Option, - pub collection_uuid: Option, - pub group_uuid: Option, - pub org_user_uuid: Option, - pub act_user_uuid: Option, + pub user_uuid: Option, + pub org_uuid: Option, + pub cipher_uuid: Option, + pub collection_uuid: Option, + pub group_uuid: Option, + pub org_user_uuid: Option, + pub act_user_uuid: Option, // Upstream enum: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Enums/DeviceType.cs pub device_type: Option, pub ip_address: Option, pub event_date: NaiveDateTime, - pub policy_uuid: Option, + pub policy_uuid: Option, pub provider_uuid: Option, pub provider_user_uuid: Option, pub provider_org_uuid: Option, @@ -49,6 +49,8 @@ pub enum EventType { UserClientExportedVault = 1007, // UserUpdatedTempPassword = 1008, // Not supported // UserMigratedKeyToKeyConnector = 1009, // Not supported + UserRequestedDeviceApproval = 1010, + // UserTdeOffboardingPasswordSet = 1011, // Not supported // Cipher CipherCreated = 1100, @@ -69,6 +71,7 @@ pub enum EventType { CipherSoftDeleted = 1115, CipherRestored = 1116, CipherClientToggledCardNumberVisible = 1117, + CipherClientToggledTOTPSeedVisible = 1118, // Collection CollectionCreated = 1300, @@ -94,6 +97,10 @@ pub enum EventType { // OrganizationUserFirstSsoLogin = 1510, // Not supported OrganizationUserRevoked = 1511, OrganizationUserRestored = 1512, + OrganizationUserApprovedAuthRequest = 1513, + OrganizationUserRejectedAuthRequest = 1514, + OrganizationUserDeleted = 1515, + OrganizationUserLeft = 1516, // Organization OrganizationUpdated = 1600, @@ -105,6 +112,7 @@ pub enum EventType { // OrganizationEnabledKeyConnector = 1606, // Not supported // OrganizationDisabledKeyConnector = 1607, // Not supported // OrganizationSponsorshipsSynced = 1608, // Not supported + // OrganizationCollectionManagementUpdated = 1609, // Not supported // Policy PolicyUpdated = 1700, @@ -117,6 +125,13 @@ pub enum EventType { // ProviderOrganizationAdded = 1901, // Not supported // ProviderOrganizationRemoved = 1902, // Not supported // ProviderOrganizationVaultAccessed = 1903, // Not supported + + // OrganizationDomainAdded = 2000, // Not supported + // OrganizationDomainRemoved = 2001, // Not supported + // OrganizationDomainVerified = 2002, // Not supported + // OrganizationDomainNotVerified = 2003, // Not supported + + // SecretRetrieved = 2100, // Not supported } /// Local methods @@ -128,7 +143,7 @@ impl Event { }; Self { - uuid: crate::util::get_uuid(), + uuid: EventId(crate::util::get_uuid()), event_type, user_uuid: None, org_uuid: None, @@ -246,7 +261,7 @@ impl Event { /// ############## /// Custom Queries pub async fn find_by_organization_uuid( - org_uuid: &str, + org_uuid: &OrganizationId, start: &NaiveDateTime, end: &NaiveDateTime, conn: &mut DbConn, @@ -263,7 +278,7 @@ impl Event { }} } - pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 { + pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 { db_run! { conn: { event::table .filter(event::org_uuid.eq(org_uuid)) @@ -274,16 +289,16 @@ impl Event { }} } - pub async fn find_by_org_and_user_org( - org_uuid: &str, - user_org_uuid: &str, + pub async fn find_by_org_and_member( + org_uuid: &OrganizationId, + member_uuid: &MembershipId, start: &NaiveDateTime, end: &NaiveDateTime, conn: &mut DbConn, ) -> Vec { db_run! { conn: { event::table - .inner_join(users_organizations::table.on(users_organizations::uuid.eq(user_org_uuid))) + .inner_join(users_organizations::table.on(users_organizations::uuid.eq(member_uuid))) .filter(event::org_uuid.eq(org_uuid)) .filter(event::event_date.between(start, end)) .filter(event::user_uuid.eq(users_organizations::user_uuid.nullable()).or(event::act_user_uuid.eq(users_organizations::user_uuid.nullable()))) @@ -297,7 +312,7 @@ impl Event { } pub async fn find_by_cipher_uuid( - cipher_uuid: &str, + cipher_uuid: &CipherId, start: &NaiveDateTime, end: &NaiveDateTime, conn: &mut DbConn, @@ -327,3 +342,6 @@ impl Event { } } } + +#[derive(Clone, Debug, DieselNewType, FromForm, Hash, PartialEq, Eq, Serialize, Deserialize)] +pub struct EventId(String); diff --git a/src/db/models/favorite.rs b/src/db/models/favorite.rs index a301f597..de2e0feb 100644 --- a/src/db/models/favorite.rs +++ b/src/db/models/favorite.rs @@ -1,12 +1,12 @@ -use super::User; +use super::{CipherId, User, UserId}; db_object! { #[derive(Identifiable, Queryable, Insertable)] #[diesel(table_name = favorites)] #[diesel(primary_key(user_uuid, cipher_uuid))] pub struct Favorite { - pub user_uuid: String, - pub cipher_uuid: String, + pub user_uuid: UserId, + pub cipher_uuid: CipherId, } } @@ -17,7 +17,7 @@ use crate::error::MapResult; impl Favorite { // Returns whether the specified cipher is a favorite of the specified user. - pub async fn is_favorite(cipher_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> bool { + pub async fn is_favorite(cipher_uuid: &CipherId, user_uuid: &UserId, conn: &mut DbConn) -> bool { db_run! { conn: { let query = favorites::table .filter(favorites::cipher_uuid.eq(cipher_uuid)) @@ -29,7 +29,12 @@ impl Favorite { } // Sets whether the specified cipher is a favorite of the specified user. - pub async fn set_favorite(favorite: bool, cipher_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn set_favorite( + favorite: bool, + cipher_uuid: &CipherId, + user_uuid: &UserId, + conn: &mut DbConn, + ) -> EmptyResult { let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, conn).await, favorite); match (old, new) { (false, true) => { @@ -62,7 +67,7 @@ impl Favorite { } // Delete all favorite entries associated with the specified cipher. - pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(favorites::table.filter(favorites::cipher_uuid.eq(cipher_uuid))) .execute(conn) @@ -71,7 +76,7 @@ impl Favorite { } // Delete all favorite entries associated with the specified user. - pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(favorites::table.filter(favorites::user_uuid.eq(user_uuid))) .execute(conn) @@ -81,12 +86,12 @@ impl Favorite { /// Return a vec with (cipher_uuid) this will only contain favorite flagged ciphers /// This is used during a full sync so we only need one query for all favorite cipher matches. - pub async fn get_all_cipher_uuid_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn get_all_cipher_uuid_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { favorites::table .filter(favorites::user_uuid.eq(user_uuid)) .select(favorites::cipher_uuid) - .load::(conn) + .load::(conn) .unwrap_or_default() }} } diff --git a/src/db/models/folder.rs b/src/db/models/folder.rs index 5370c9dd..654ccd6d 100644 --- a/src/db/models/folder.rs +++ b/src/db/models/folder.rs @@ -1,17 +1,19 @@ use chrono::{NaiveDateTime, Utc}; +use derive_more::{AsRef, Deref, Display, From}; use serde_json::Value; -use super::User; +use super::{CipherId, User, UserId}; +use macros::UuidFromParam; db_object! { #[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[diesel(table_name = folders)] #[diesel(primary_key(uuid))] pub struct Folder { - pub uuid: String, + pub uuid: FolderId, pub created_at: NaiveDateTime, pub updated_at: NaiveDateTime, - pub user_uuid: String, + pub user_uuid: UserId, pub name: String, } @@ -19,18 +21,18 @@ db_object! { #[diesel(table_name = folders_ciphers)] #[diesel(primary_key(cipher_uuid, folder_uuid))] pub struct FolderCipher { - pub cipher_uuid: String, - pub folder_uuid: String, + pub cipher_uuid: CipherId, + pub folder_uuid: FolderId, } } /// Local methods impl Folder { - pub fn new(user_uuid: String, name: String) -> Self { + pub fn new(user_uuid: UserId, name: String) -> Self { let now = Utc::now().naive_utc(); Self { - uuid: crate::util::get_uuid(), + uuid: FolderId(crate::util::get_uuid()), created_at: now, updated_at: now, @@ -52,10 +54,10 @@ impl Folder { } impl FolderCipher { - pub fn new(folder_uuid: &str, cipher_uuid: &str) -> Self { + pub fn new(folder_uuid: FolderId, cipher_uuid: CipherId) -> Self { Self { - folder_uuid: folder_uuid.to_string(), - cipher_uuid: cipher_uuid.to_string(), + folder_uuid, + cipher_uuid, } } } @@ -113,24 +115,25 @@ impl Folder { }} } - pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { for folder in Self::find_by_user(user_uuid, conn).await { folder.delete(conn).await?; } Ok(()) } - pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid_and_user(uuid: &FolderId, user_uuid: &UserId, conn: &mut DbConn) -> Option { db_run! { conn: { folders::table .filter(folders::uuid.eq(uuid)) + .filter(folders::user_uuid.eq(user_uuid)) .first::(conn) .ok() .from_db() }} } - pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { folders::table .filter(folders::user_uuid.eq(user_uuid)) @@ -176,7 +179,7 @@ impl FolderCipher { }} } - pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(folders_ciphers::table.filter(folders_ciphers::cipher_uuid.eq(cipher_uuid))) .execute(conn) @@ -184,7 +187,7 @@ impl FolderCipher { }} } - pub async fn delete_all_by_folder(folder_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(folders_ciphers::table.filter(folders_ciphers::folder_uuid.eq(folder_uuid))) .execute(conn) @@ -192,7 +195,11 @@ impl FolderCipher { }} } - pub async fn find_by_folder_and_cipher(folder_uuid: &str, cipher_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_folder_and_cipher( + folder_uuid: &FolderId, + cipher_uuid: &CipherId, + conn: &mut DbConn, + ) -> Option { db_run! { conn: { folders_ciphers::table .filter(folders_ciphers::folder_uuid.eq(folder_uuid)) @@ -203,7 +210,7 @@ impl FolderCipher { }} } - pub async fn find_by_folder(folder_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> Vec { db_run! { conn: { folders_ciphers::table .filter(folders_ciphers::folder_uuid.eq(folder_uuid)) @@ -215,14 +222,32 @@ impl FolderCipher { /// Return a vec with (cipher_uuid, folder_uuid) /// This is used during a full sync so we only need one query for all folder matches. - pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<(String, String)> { + pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<(CipherId, FolderId)> { db_run! { conn: { folders_ciphers::table .inner_join(folders::table) .filter(folders::user_uuid.eq(user_uuid)) .select(folders_ciphers::all_columns) - .load::<(String, String)>(conn) + .load::<(CipherId, FolderId)>(conn) .unwrap_or_default() }} } } + +#[derive( + Clone, + Debug, + AsRef, + Deref, + DieselNewType, + Display, + From, + FromForm, + Hash, + PartialEq, + Eq, + Serialize, + Deserialize, + UuidFromParam, +)] +pub struct FolderId(String); diff --git a/src/db/models/group.rs b/src/db/models/group.rs index e226512d..e85b8c05 100644 --- a/src/db/models/group.rs +++ b/src/db/models/group.rs @@ -1,8 +1,10 @@ -use super::{User, UserOrganization}; +use super::{CollectionId, Membership, MembershipId, OrganizationId, User, UserId}; use crate::api::EmptyResult; use crate::db::DbConn; use crate::error::MapResult; use chrono::{NaiveDateTime, Utc}; +use derive_more::{AsRef, Deref, Display, From}; +use macros::UuidFromParam; use serde_json::Value; db_object! { @@ -10,8 +12,8 @@ db_object! { #[diesel(table_name = groups)] #[diesel(primary_key(uuid))] pub struct Group { - pub uuid: String, - pub organizations_uuid: String, + pub uuid: GroupId, + pub organizations_uuid: OrganizationId, pub name: String, pub access_all: bool, pub external_id: Option, @@ -23,28 +25,34 @@ db_object! { #[diesel(table_name = collections_groups)] #[diesel(primary_key(collections_uuid, groups_uuid))] pub struct CollectionGroup { - pub collections_uuid: String, - pub groups_uuid: String, + pub collections_uuid: CollectionId, + pub groups_uuid: GroupId, pub read_only: bool, pub hide_passwords: bool, + pub manage: bool, } #[derive(Identifiable, Queryable, Insertable)] #[diesel(table_name = groups_users)] #[diesel(primary_key(groups_uuid, users_organizations_uuid))] pub struct GroupUser { - pub groups_uuid: String, - pub users_organizations_uuid: String + pub groups_uuid: GroupId, + pub users_organizations_uuid: MembershipId } } /// Local methods impl Group { - pub fn new(organizations_uuid: String, name: String, access_all: bool, external_id: Option) -> Self { + pub fn new( + organizations_uuid: OrganizationId, + name: String, + access_all: bool, + external_id: Option, + ) -> Self { let now = Utc::now().naive_utc(); let mut new_model = Self { - uuid: crate::util::get_uuid(), + uuid: GroupId(crate::util::get_uuid()), organizations_uuid, name, access_all, @@ -74,6 +82,9 @@ impl Group { } pub async fn to_json_details(&self, conn: &mut DbConn) -> Value { + // If both read_only and hide_passwords are false, then manage should be true + // You can't have an entry with read_only and manage, or hide_passwords and manage + // Or an entry with everything to false let collections_groups: Vec = CollectionGroup::find_by_group(&self.uuid, conn) .await .iter() @@ -82,7 +93,7 @@ impl Group { "id": entry.collections_uuid, "readOnly": entry.read_only, "hidePasswords": entry.hide_passwords, - "manage": false + "manage": entry.manage, }) }) .collect(); @@ -108,18 +119,38 @@ impl Group { } impl CollectionGroup { - pub fn new(collections_uuid: String, groups_uuid: String, read_only: bool, hide_passwords: bool) -> Self { + pub fn new( + collections_uuid: CollectionId, + groups_uuid: GroupId, + read_only: bool, + hide_passwords: bool, + manage: bool, + ) -> Self { Self { collections_uuid, groups_uuid, read_only, hide_passwords, + manage, } } + + pub fn to_json_details_for_group(&self) -> Value { + // If both read_only and hide_passwords are false, then manage should be true + // You can't have an entry with read_only and manage, or hide_passwords and manage + // Or an entry with everything to false + // For backwards compaibility and migration proposes we keep checking read_only and hide_password + json!({ + "id": self.groups_uuid, + "readOnly": self.read_only, + "hidePasswords": self.hide_passwords, + "manage": self.manage || (!self.read_only && !self.hide_passwords), + }) + } } impl GroupUser { - pub fn new(groups_uuid: String, users_organizations_uuid: String) -> Self { + pub fn new(groups_uuid: GroupId, users_organizations_uuid: MembershipId) -> Self { Self { groups_uuid, users_organizations_uuid, @@ -163,27 +194,27 @@ impl Group { } } - pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult { for group in Self::find_by_organization(org_uuid, conn).await { group.delete(conn).await?; } Ok(()) } - pub async fn find_by_organization(organizations_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { db_run! { conn: { groups::table - .filter(groups::organizations_uuid.eq(organizations_uuid)) + .filter(groups::organizations_uuid.eq(org_uuid)) .load::(conn) .expect("Error loading groups") .from_db() }} } - pub async fn count_by_org(organizations_uuid: &str, conn: &mut DbConn) -> i64 { + pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 { db_run! { conn: { groups::table - .filter(groups::organizations_uuid.eq(organizations_uuid)) + .filter(groups::organizations_uuid.eq(org_uuid)) .count() .first::(conn) .ok() @@ -191,17 +222,22 @@ impl Group { }} } - pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid_and_org(uuid: &GroupId, org_uuid: &OrganizationId, conn: &mut DbConn) -> Option { db_run! { conn: { groups::table .filter(groups::uuid.eq(uuid)) + .filter(groups::organizations_uuid.eq(org_uuid)) .first::(conn) .ok() .from_db() }} } - pub async fn find_by_external_id_and_org(external_id: &str, org_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_external_id_and_org( + external_id: &str, + org_uuid: &OrganizationId, + conn: &mut DbConn, + ) -> Option { db_run! { conn: { groups::table .filter(groups::external_id.eq(external_id)) @@ -212,7 +248,7 @@ impl Group { }} } //Returns all organizations the user has full access to - pub async fn gather_user_organizations_full_access(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn get_orgs_by_user_with_full_access(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { groups_users::table .inner_join(users_organizations::table.on( @@ -225,12 +261,12 @@ impl Group { .filter(groups::access_all.eq(true)) .select(groups::organizations_uuid) .distinct() - .load::(conn) + .load::(conn) .expect("Error loading organization group full access information for user") }} } - pub async fn is_in_full_access_group(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> bool { + pub async fn is_in_full_access_group(user_uuid: &UserId, org_uuid: &OrganizationId, conn: &mut DbConn) -> bool { db_run! { conn: { groups::table .inner_join(groups_users::table.on( @@ -259,13 +295,13 @@ impl Group { }} } - pub async fn update_revision(uuid: &str, conn: &mut DbConn) { + pub async fn update_revision(uuid: &GroupId, conn: &mut DbConn) { if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await { warn!("Failed to update revision for {}: {:#?}", uuid, e); } } - async fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult { + async fn _update_revision(uuid: &GroupId, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult { db_run! {conn: { crate::util::retry(|| { diesel::update(groups::table.filter(groups::uuid.eq(uuid))) @@ -292,6 +328,7 @@ impl CollectionGroup { collections_groups::groups_uuid.eq(&self.groups_uuid), collections_groups::read_only.eq(&self.read_only), collections_groups::hide_passwords.eq(&self.hide_passwords), + collections_groups::manage.eq(&self.manage), )) .execute(conn) { @@ -306,6 +343,7 @@ impl CollectionGroup { collections_groups::groups_uuid.eq(&self.groups_uuid), collections_groups::read_only.eq(&self.read_only), collections_groups::hide_passwords.eq(&self.hide_passwords), + collections_groups::manage.eq(&self.manage), )) .execute(conn) .map_res("Error adding group to collection") @@ -320,12 +358,14 @@ impl CollectionGroup { collections_groups::groups_uuid.eq(&self.groups_uuid), collections_groups::read_only.eq(self.read_only), collections_groups::hide_passwords.eq(self.hide_passwords), + collections_groups::manage.eq(self.manage), )) .on_conflict((collections_groups::collections_uuid, collections_groups::groups_uuid)) .do_update() .set(( collections_groups::read_only.eq(self.read_only), collections_groups::hide_passwords.eq(self.hide_passwords), + collections_groups::manage.eq(self.manage), )) .execute(conn) .map_res("Error adding group to collection") @@ -333,7 +373,7 @@ impl CollectionGroup { } } - pub async fn find_by_group(group_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> Vec { db_run! { conn: { collections_groups::table .filter(collections_groups::groups_uuid.eq(group_uuid)) @@ -343,7 +383,7 @@ impl CollectionGroup { }} } - pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { collections_groups::table .inner_join(groups_users::table.on( @@ -360,7 +400,7 @@ impl CollectionGroup { }} } - pub async fn find_by_collection(collection_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> Vec { db_run! { conn: { collections_groups::table .filter(collections_groups::collections_uuid.eq(collection_uuid)) @@ -386,7 +426,7 @@ impl CollectionGroup { }} } - pub async fn delete_all_by_group(group_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> EmptyResult { let group_users = GroupUser::find_by_group(group_uuid, conn).await; for group_user in group_users { group_user.update_user_revision(conn).await; @@ -400,7 +440,7 @@ impl CollectionGroup { }} } - pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult { let collection_assigned_to_groups = CollectionGroup::find_by_collection(collection_uuid, conn).await; for collection_assigned_to_group in collection_assigned_to_groups { let group_users = GroupUser::find_by_group(&collection_assigned_to_group.groups_uuid, conn).await; @@ -465,7 +505,7 @@ impl GroupUser { } } - pub async fn find_by_group(group_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> Vec { db_run! { conn: { groups_users::table .filter(groups_users::groups_uuid.eq(group_uuid)) @@ -475,10 +515,10 @@ impl GroupUser { }} } - pub async fn find_by_user(users_organizations_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_member(member_uuid: &MembershipId, conn: &mut DbConn) -> Vec { db_run! { conn: { groups_users::table - .filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid)) + .filter(groups_users::users_organizations_uuid.eq(member_uuid)) .load::(conn) .expect("Error loading groups for user") .from_db() @@ -486,8 +526,8 @@ impl GroupUser { } pub async fn has_access_to_collection_by_member( - collection_uuid: &str, - member_uuid: &str, + collection_uuid: &CollectionId, + member_uuid: &MembershipId, conn: &mut DbConn, ) -> bool { db_run! { conn: { @@ -503,7 +543,11 @@ impl GroupUser { }} } - pub async fn has_full_access_by_member(org_uuid: &str, member_uuid: &str, conn: &mut DbConn) -> bool { + pub async fn has_full_access_by_member( + org_uuid: &OrganizationId, + member_uuid: &MembershipId, + conn: &mut DbConn, + ) -> bool { db_run! { conn: { groups_users::table .inner_join(groups::table.on( @@ -519,32 +563,32 @@ impl GroupUser { } pub async fn update_user_revision(&self, conn: &mut DbConn) { - match UserOrganization::find_by_uuid(&self.users_organizations_uuid, conn).await { - Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await, - None => warn!("User could not be found!"), + match Membership::find_by_uuid(&self.users_organizations_uuid, conn).await { + Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await, + None => warn!("Member could not be found!"), } } - pub async fn delete_by_group_id_and_user_id( - group_uuid: &str, - users_organizations_uuid: &str, + pub async fn delete_by_group_and_member( + group_uuid: &GroupId, + member_uuid: &MembershipId, conn: &mut DbConn, ) -> EmptyResult { - match UserOrganization::find_by_uuid(users_organizations_uuid, conn).await { - Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await, - None => warn!("User could not be found!"), + match Membership::find_by_uuid(member_uuid, conn).await { + Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await, + None => warn!("Member could not be found!"), }; db_run! { conn: { diesel::delete(groups_users::table) .filter(groups_users::groups_uuid.eq(group_uuid)) - .filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid)) + .filter(groups_users::users_organizations_uuid.eq(member_uuid)) .execute(conn) .map_res("Error deleting group users") }} } - pub async fn delete_all_by_group(group_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> EmptyResult { let group_users = GroupUser::find_by_group(group_uuid, conn).await; for group_user in group_users { group_user.update_user_revision(conn).await; @@ -558,17 +602,35 @@ impl GroupUser { }} } - pub async fn delete_all_by_user(users_organizations_uuid: &str, conn: &mut DbConn) -> EmptyResult { - match UserOrganization::find_by_uuid(users_organizations_uuid, conn).await { - Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await, - None => warn!("User could not be found!"), + pub async fn delete_all_by_member(member_uuid: &MembershipId, conn: &mut DbConn) -> EmptyResult { + match Membership::find_by_uuid(member_uuid, conn).await { + Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await, + None => warn!("Member could not be found!"), } db_run! { conn: { diesel::delete(groups_users::table) - .filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid)) + .filter(groups_users::users_organizations_uuid.eq(member_uuid)) .execute(conn) .map_res("Error deleting user groups") }} } } + +#[derive( + Clone, + Debug, + AsRef, + Deref, + DieselNewType, + Display, + From, + FromForm, + Hash, + PartialEq, + Eq, + Serialize, + Deserialize, + UuidFromParam, +)] +pub struct GroupId(String); diff --git a/src/db/models/mod.rs b/src/db/models/mod.rs index c336cb1a..90d17313 100644 --- a/src/db/models/mod.rs +++ b/src/db/models/mod.rs @@ -16,20 +16,26 @@ mod two_factor_duo_context; mod two_factor_incomplete; mod user; -pub use self::attachment::Attachment; -pub use self::auth_request::AuthRequest; -pub use self::cipher::Cipher; -pub use self::collection::{Collection, CollectionCipher, CollectionUser}; -pub use self::device::{Device, DeviceType}; -pub use self::emergency_access::{EmergencyAccess, EmergencyAccessStatus, EmergencyAccessType}; +pub use self::attachment::{Attachment, AttachmentId}; +pub use self::auth_request::{AuthRequest, AuthRequestId}; +pub use self::cipher::{Cipher, CipherId, RepromptType}; +pub use self::collection::{Collection, CollectionCipher, CollectionId, CollectionUser}; +pub use self::device::{Device, DeviceId, DeviceType}; +pub use self::emergency_access::{EmergencyAccess, EmergencyAccessId, EmergencyAccessStatus, EmergencyAccessType}; pub use self::event::{Event, EventType}; pub use self::favorite::Favorite; -pub use self::folder::{Folder, FolderCipher}; -pub use self::group::{CollectionGroup, Group, GroupUser}; -pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyType}; -pub use self::organization::{Organization, OrganizationApiKey, UserOrgStatus, UserOrgType, UserOrganization}; -pub use self::send::{Send, SendType}; +pub use self::folder::{Folder, FolderCipher, FolderId}; +pub use self::group::{CollectionGroup, Group, GroupId, GroupUser}; +pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyId, OrgPolicyType}; +pub use self::organization::{ + Membership, MembershipId, MembershipStatus, MembershipType, OrgApiKeyId, Organization, OrganizationApiKey, + OrganizationId, +}; +pub use self::send::{ + id::{SendFileId, SendId}, + Send, SendType, +}; pub use self::two_factor::{TwoFactor, TwoFactorType}; pub use self::two_factor_duo_context::TwoFactorDuoContext; pub use self::two_factor_incomplete::TwoFactorIncomplete; -pub use self::user::{Invitation, User, UserKdfType, UserStampException}; +pub use self::user::{Invitation, User, UserId, UserKdfType, UserStampException}; diff --git a/src/db/models/org_policy.rs b/src/db/models/org_policy.rs index 23e583b4..304b3742 100644 --- a/src/db/models/org_policy.rs +++ b/src/db/models/org_policy.rs @@ -1,3 +1,4 @@ +use derive_more::{AsRef, From}; use serde::Deserialize; use serde_json::Value; @@ -5,15 +6,15 @@ use crate::api::EmptyResult; use crate::db::DbConn; use crate::error::MapResult; -use super::{TwoFactor, UserOrgStatus, UserOrgType, UserOrganization}; +use super::{Membership, MembershipId, MembershipStatus, MembershipType, OrganizationId, TwoFactor, UserId}; db_object! { #[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[diesel(table_name = org_policies)] #[diesel(primary_key(uuid))] pub struct OrgPolicy { - pub uuid: String, - pub org_uuid: String, + pub uuid: OrgPolicyId, + pub org_uuid: OrganizationId, pub atype: i32, pub enabled: bool, pub data: String, @@ -62,9 +63,9 @@ pub enum OrgPolicyErr { /// Local methods impl OrgPolicy { - pub fn new(org_uuid: String, atype: OrgPolicyType, data: String) -> Self { + pub fn new(org_uuid: OrganizationId, atype: OrgPolicyType, data: String) -> Self { Self { - uuid: crate::util::get_uuid(), + uuid: OrgPolicyId(crate::util::get_uuid()), org_uuid, atype: atype as i32, enabled: false, @@ -142,17 +143,7 @@ impl OrgPolicy { }} } - pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option { - db_run! { conn: { - org_policies::table - .filter(org_policies::uuid.eq(uuid)) - .first::(conn) - .ok() - .from_db() - }} - } - - pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { db_run! { conn: { org_policies::table .filter(org_policies::org_uuid.eq(org_uuid)) @@ -162,7 +153,7 @@ impl OrgPolicy { }} } - pub async fn find_confirmed_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_confirmed_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { org_policies::table .inner_join( @@ -171,7 +162,7 @@ impl OrgPolicy { .and(users_organizations::user_uuid.eq(user_uuid))) ) .filter( - users_organizations::status.eq(UserOrgStatus::Confirmed as i32) + users_organizations::status.eq(MembershipStatus::Confirmed as i32) ) .select(org_policies::all_columns) .load::(conn) @@ -180,7 +171,11 @@ impl OrgPolicy { }} } - pub async fn find_by_org_and_type(org_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> Option { + pub async fn find_by_org_and_type( + org_uuid: &OrganizationId, + policy_type: OrgPolicyType, + conn: &mut DbConn, + ) -> Option { db_run! { conn: { org_policies::table .filter(org_policies::org_uuid.eq(org_uuid)) @@ -191,7 +186,7 @@ impl OrgPolicy { }} } - pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(org_policies::table.filter(org_policies::org_uuid.eq(org_uuid))) .execute(conn) @@ -200,7 +195,7 @@ impl OrgPolicy { } pub async fn find_accepted_and_confirmed_by_user_and_active_policy( - user_uuid: &str, + user_uuid: &UserId, policy_type: OrgPolicyType, conn: &mut DbConn, ) -> Vec { @@ -212,10 +207,10 @@ impl OrgPolicy { .and(users_organizations::user_uuid.eq(user_uuid))) ) .filter( - users_organizations::status.eq(UserOrgStatus::Accepted as i32) + users_organizations::status.eq(MembershipStatus::Accepted as i32) ) .or_filter( - users_organizations::status.eq(UserOrgStatus::Confirmed as i32) + users_organizations::status.eq(MembershipStatus::Confirmed as i32) ) .filter(org_policies::atype.eq(policy_type as i32)) .filter(org_policies::enabled.eq(true)) @@ -227,7 +222,7 @@ impl OrgPolicy { } pub async fn find_confirmed_by_user_and_active_policy( - user_uuid: &str, + user_uuid: &UserId, policy_type: OrgPolicyType, conn: &mut DbConn, ) -> Vec { @@ -239,7 +234,7 @@ impl OrgPolicy { .and(users_organizations::user_uuid.eq(user_uuid))) ) .filter( - users_organizations::status.eq(UserOrgStatus::Confirmed as i32) + users_organizations::status.eq(MembershipStatus::Confirmed as i32) ) .filter(org_policies::atype.eq(policy_type as i32)) .filter(org_policies::enabled.eq(true)) @@ -254,21 +249,21 @@ impl OrgPolicy { /// and the user is not an owner or admin of that org. This is only useful for checking /// applicability of policy types that have these particular semantics. pub async fn is_applicable_to_user( - user_uuid: &str, + user_uuid: &UserId, policy_type: OrgPolicyType, - exclude_org_uuid: Option<&str>, + exclude_org_uuid: Option<&OrganizationId>, conn: &mut DbConn, ) -> bool { for policy in OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(user_uuid, policy_type, conn).await { // Check if we need to skip this organization. - if exclude_org_uuid.is_some() && exclude_org_uuid.unwrap() == policy.org_uuid { + if exclude_org_uuid.is_some() && *exclude_org_uuid.unwrap() == policy.org_uuid { continue; } - if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await { - if user.atype < UserOrgType::Admin { + if let Some(user) = Membership::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await { + if user.atype < MembershipType::Admin { return true; } } @@ -277,8 +272,8 @@ impl OrgPolicy { } pub async fn is_user_allowed( - user_uuid: &str, - org_uuid: &str, + user_uuid: &UserId, + org_uuid: &OrganizationId, exclude_current_org: bool, conn: &mut DbConn, ) -> OrgPolicyResult { @@ -306,7 +301,7 @@ impl OrgPolicy { Ok(()) } - pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &mut DbConn) -> bool { + pub async fn org_is_reset_password_auto_enroll(org_uuid: &OrganizationId, conn: &mut DbConn) -> bool { match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await { Some(policy) => match serde_json::from_str::(&policy.data) { Ok(opts) => { @@ -322,12 +317,12 @@ impl OrgPolicy { /// Returns true if the user belongs to an org that has enabled the `DisableHideEmail` /// option of the `Send Options` policy, and the user is not an owner or admin of that org. - pub async fn is_hide_email_disabled(user_uuid: &str, conn: &mut DbConn) -> bool { + pub async fn is_hide_email_disabled(user_uuid: &UserId, conn: &mut DbConn) -> bool { for policy in OrgPolicy::find_confirmed_by_user_and_active_policy(user_uuid, OrgPolicyType::SendOptions, conn).await { - if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await { - if user.atype < UserOrgType::Admin { + if let Some(user) = Membership::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await { + if user.atype < MembershipType::Admin { match serde_json::from_str::(&policy.data) { Ok(opts) => { if opts.disable_hide_email { @@ -342,12 +337,19 @@ impl OrgPolicy { false } - pub async fn is_enabled_for_member(org_user_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> bool { - if let Some(membership) = UserOrganization::find_by_uuid(org_user_uuid, conn).await { - if let Some(policy) = OrgPolicy::find_by_org_and_type(&membership.org_uuid, policy_type, conn).await { + pub async fn is_enabled_for_member( + member_uuid: &MembershipId, + policy_type: OrgPolicyType, + conn: &mut DbConn, + ) -> bool { + if let Some(member) = Membership::find_by_uuid(member_uuid, conn).await { + if let Some(policy) = OrgPolicy::find_by_org_and_type(&member.org_uuid, policy_type, conn).await { return policy.enabled; } } false } } + +#[derive(Clone, Debug, AsRef, DieselNewType, From, FromForm, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct OrgPolicyId(String); diff --git a/src/db/models/organization.rs b/src/db/models/organization.rs index 15f00991..2b54e1d0 100644 --- a/src/db/models/organization.rs +++ b/src/db/models/organization.rs @@ -1,4 +1,5 @@ use chrono::{NaiveDateTime, Utc}; +use derive_more::{AsRef, Deref, Display, From}; use num_traits::FromPrimitive; use serde_json::Value; use std::{ @@ -6,16 +7,19 @@ use std::{ collections::{HashMap, HashSet}, }; -use super::{CollectionUser, Group, GroupUser, OrgPolicy, OrgPolicyType, TwoFactor, User}; -use crate::db::models::{Collection, CollectionGroup}; +use super::{ + CipherId, Collection, CollectionGroup, CollectionId, CollectionUser, Group, GroupId, GroupUser, OrgPolicy, + OrgPolicyType, TwoFactor, User, UserId, +}; use crate::CONFIG; +use macros::UuidFromParam; db_object! { #[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[diesel(table_name = organizations)] #[diesel(primary_key(uuid))] pub struct Organization { - pub uuid: String, + pub uuid: OrganizationId, pub name: String, pub billing_email: String, pub private_key: Option, @@ -25,10 +29,10 @@ db_object! { #[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[diesel(table_name = users_organizations)] #[diesel(primary_key(uuid))] - pub struct UserOrganization { - pub uuid: String, - pub user_uuid: String, - pub org_uuid: String, + pub struct Membership { + pub uuid: MembershipId, + pub user_uuid: UserId, + pub org_uuid: OrganizationId, pub access_all: bool, pub akey: String, @@ -42,8 +46,8 @@ db_object! { #[diesel(table_name = organization_api_key)] #[diesel(primary_key(uuid, org_uuid))] pub struct OrganizationApiKey { - pub uuid: String, - pub org_uuid: String, + pub uuid: OrgApiKeyId, + pub org_uuid: OrganizationId, pub atype: i32, pub api_key: String, pub revision_date: NaiveDateTime, @@ -51,59 +55,75 @@ db_object! { } // https://github.com/bitwarden/server/blob/b86a04cef9f1e1b82cf18e49fc94e017c641130c/src/Core/Enums/OrganizationUserStatusType.cs -pub enum UserOrgStatus { +#[derive(PartialEq)] +pub enum MembershipStatus { Revoked = -1, Invited = 0, Accepted = 1, Confirmed = 2, } +impl MembershipStatus { + pub fn from_i32(status: i32) -> Option { + match status { + 0 => Some(Self::Invited), + 1 => Some(Self::Accepted), + 2 => Some(Self::Confirmed), + // NOTE: we don't care about revoked members where this is used + // if this ever changes also adapt the OrgHeaders check. + _ => None, + } + } +} + #[derive(Copy, Clone, PartialEq, Eq, num_derive::FromPrimitive)] -pub enum UserOrgType { +pub enum MembershipType { Owner = 0, Admin = 1, User = 2, Manager = 3, } -impl UserOrgType { +impl MembershipType { pub fn from_str(s: &str) -> Option { match s { - "0" | "Owner" => Some(UserOrgType::Owner), - "1" | "Admin" => Some(UserOrgType::Admin), - "2" | "User" => Some(UserOrgType::User), - "3" | "Manager" => Some(UserOrgType::Manager), + "0" | "Owner" => Some(MembershipType::Owner), + "1" | "Admin" => Some(MembershipType::Admin), + "2" | "User" => Some(MembershipType::User), + "3" | "Manager" => Some(MembershipType::Manager), + // HACK: We convert the custom role to a manager role + "4" | "Custom" => Some(MembershipType::Manager), _ => None, } } } -impl Ord for UserOrgType { - fn cmp(&self, other: &UserOrgType) -> Ordering { +impl Ord for MembershipType { + fn cmp(&self, other: &MembershipType) -> Ordering { // For easy comparison, map each variant to an access level (where 0 is lowest). - static ACCESS_LEVEL: [i32; 4] = [ + const ACCESS_LEVEL: [i32; 4] = [ 3, // Owner 2, // Admin 0, // User - 1, // Manager + 1, // Manager && Custom ]; ACCESS_LEVEL[*self as usize].cmp(&ACCESS_LEVEL[*other as usize]) } } -impl PartialOrd for UserOrgType { - fn partial_cmp(&self, other: &UserOrgType) -> Option { +impl PartialOrd for MembershipType { + fn partial_cmp(&self, other: &MembershipType) -> Option { Some(self.cmp(other)) } } -impl PartialEq for UserOrgType { +impl PartialEq for MembershipType { fn eq(&self, other: &i32) -> bool { *other == *self as i32 } } -impl PartialOrd for UserOrgType { +impl PartialOrd for MembershipType { fn partial_cmp(&self, other: &i32) -> Option { if let Some(other) = Self::from_i32(*other) { return Some(self.cmp(&other)); @@ -120,25 +140,25 @@ impl PartialOrd for UserOrgType { } } -impl PartialEq for i32 { - fn eq(&self, other: &UserOrgType) -> bool { +impl PartialEq for i32 { + fn eq(&self, other: &MembershipType) -> bool { *self == *other as i32 } } -impl PartialOrd for i32 { - fn partial_cmp(&self, other: &UserOrgType) -> Option { - if let Some(self_type) = UserOrgType::from_i32(*self) { +impl PartialOrd for i32 { + fn partial_cmp(&self, other: &MembershipType) -> Option { + if let Some(self_type) = MembershipType::from_i32(*self) { return Some(self_type.cmp(other)); } None } - fn lt(&self, other: &UserOrgType) -> bool { + fn lt(&self, other: &MembershipType) -> bool { matches!(self.partial_cmp(other), Some(Ordering::Less) | None) } - fn le(&self, other: &UserOrgType) -> bool { + fn le(&self, other: &MembershipType) -> bool { matches!(self.partial_cmp(other), Some(Ordering::Less | Ordering::Equal) | None) } } @@ -146,8 +166,9 @@ impl PartialOrd for i32 { /// Local methods impl Organization { pub fn new(name: String, billing_email: String, private_key: Option, public_key: Option) -> Self { + let billing_email = billing_email.to_lowercase(); Self { - uuid: crate::util::get_uuid(), + uuid: OrganizationId(crate::util::get_uuid()), name, billing_email, private_key, @@ -158,33 +179,46 @@ impl Organization { pub fn to_json(&self) -> Value { json!({ "id": self.uuid, - "identifier": null, // not supported by us "name": self.name, "seats": null, "maxCollections": null, "maxStorageGb": i16::MAX, // The value doesn't matter, we don't check server-side "use2fa": true, - "useCustomPermissions": false, + "useCustomPermissions": true, "useDirectory": false, // Is supported, but this value isn't checked anywhere (yet) "useEvents": CONFIG.org_events_enabled(), "useGroups": CONFIG.org_groups_enabled(), "useTotp": true, "usePolicies": true, - // "useScim": false, // Not supported (Not AGPLv3 Licensed) + "useScim": false, // Not supported (Not AGPLv3 Licensed) "useSso": false, // Not supported - // "useKeyConnector": false, // Not supported + "useKeyConnector": false, // Not supported + "usePasswordManager": true, + "useSecretsManager": false, // Not supported (Not AGPLv3 Licensed) "selfHost": true, "useApi": true, "hasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(), "useResetPassword": CONFIG.mail_enabled(), + "allowAdminAccessToAllCollectionItems": true, + "limitCollectionCreation": true, + "limitCollectionCreationDeletion": true, + "limitCollectionDeletion": true, - "businessName": null, + "businessName": self.name, "businessAddress1": null, "businessAddress2": null, "businessAddress3": null, "businessCountry": null, "businessTaxNumber": null, + "maxAutoscaleSeats": null, + "maxAutoscaleSmSeats": null, + "maxAutoscaleSmServiceAccounts": null, + + "secretsManagerPlan": null, + "smSeats": null, + "smServiceAccounts": null, + "billingEmail": self.billing_email, "planType": 6, // Custom plan "usersGetPremium": true, @@ -197,27 +231,27 @@ impl Organization { // The number 128 should be fine, it is well within the range of an i32 // The same goes for the database where we only use INTEGER (the same as an i32) // It should also provide enough room for 100+ types, which i doubt will ever happen. -static ACTIVATE_REVOKE_DIFF: i32 = 128; +const ACTIVATE_REVOKE_DIFF: i32 = 128; -impl UserOrganization { - pub fn new(user_uuid: String, org_uuid: String) -> Self { +impl Membership { + pub fn new(user_uuid: UserId, org_uuid: OrganizationId) -> Self { Self { - uuid: crate::util::get_uuid(), + uuid: MembershipId(crate::util::get_uuid()), user_uuid, org_uuid, access_all: false, akey: String::new(), - status: UserOrgStatus::Accepted as i32, - atype: UserOrgType::User as i32, + status: MembershipStatus::Accepted as i32, + atype: MembershipType::User as i32, reset_password_key: None, external_id: None, } } pub fn restore(&mut self) -> bool { - if self.status < UserOrgStatus::Invited as i32 { + if self.status < MembershipStatus::Invited as i32 { self.status += ACTIVATE_REVOKE_DIFF; return true; } @@ -225,7 +259,7 @@ impl UserOrganization { } pub fn revoke(&mut self) -> bool { - if self.status > UserOrgStatus::Revoked as i32 { + if self.status > MembershipStatus::Revoked as i32 { self.status -= ACTIVATE_REVOKE_DIFF; return true; } @@ -234,7 +268,7 @@ impl UserOrganization { /// Return the status of the user in an unrevoked state pub fn get_unrevoked_status(&self) -> i32 { - if self.status <= UserOrgStatus::Revoked as i32 { + if self.status <= MembershipStatus::Revoked as i32 { return self.status + ACTIVATE_REVOKE_DIFF; } self.status @@ -252,12 +286,21 @@ impl UserOrganization { } false } + + /// HACK: Convert the manager type to a custom type + /// It will be converted back on other locations + pub fn type_manager_as_custom(&self) -> i32 { + match self.atype { + 3 => 4, + _ => self.atype, + } + } } impl OrganizationApiKey { - pub fn new(org_uuid: String, api_key: String) -> Self { + pub fn new(org_uuid: OrganizationId, api_key: String) -> Self { Self { - uuid: crate::util::get_uuid(), + uuid: OrgApiKeyId(crate::util::get_uuid()), org_uuid, atype: 0, // Type 0 is the default and only type we support currently @@ -279,12 +322,12 @@ use crate::error::MapResult; /// Database methods impl Organization { pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { - if !email_address::EmailAddress::is_valid(self.billing_email.trim()) { - err!(format!("BillingEmail {} is not a valid email address", self.billing_email.trim())) + if !crate::util::is_valid_email(&self.billing_email) { + err!(format!("BillingEmail {} is not a valid email address", self.billing_email)) } - for user_org in UserOrganization::find_by_org(&self.uuid, conn).await.iter() { - User::update_uuid_revision(&user_org.user_uuid, conn).await; + for member in Membership::find_by_org(&self.uuid, conn).await.iter() { + User::update_uuid_revision(&member.user_uuid, conn).await; } db_run! { conn: @@ -324,7 +367,7 @@ impl Organization { Cipher::delete_all_by_organization(&self.uuid, conn).await?; Collection::delete_all_by_organization(&self.uuid, conn).await?; - UserOrganization::delete_all_by_organization(&self.uuid, conn).await?; + Membership::delete_all_by_organization(&self.uuid, conn).await?; OrgPolicy::delete_all_by_organization(&self.uuid, conn).await?; Group::delete_all_by_organization(&self.uuid, conn).await?; OrganizationApiKey::delete_all_by_organization(&self.uuid, conn).await?; @@ -336,7 +379,7 @@ impl Organization { }} } - pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid(uuid: &OrganizationId, conn: &mut DbConn) -> Option { db_run! { conn: { organizations::table .filter(organizations::uuid.eq(uuid)) @@ -352,21 +395,25 @@ impl Organization { } } -impl UserOrganization { +impl Membership { pub async fn to_json(&self, conn: &mut DbConn) -> Value { let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap(); + // HACK: Convert the manager type to a custom type + // It will be converted back on other locations + let membership_type = self.type_manager_as_custom(); + let permissions = json!({ - // TODO: Add support for Custom User Roles + // TODO: Add full support for Custom User Roles // See: https://bitwarden.com/help/article/user-types-access-control/#custom-role + // Currently we use the custom role as a manager role and link the 3 Collection roles to mimic the access_all permission "accessEventLogs": false, "accessImportExport": false, "accessReports": false, - "createNewCollections": false, - "editAnyCollection": false, - "deleteAnyCollection": false, - "editAssignedCollections": false, - "deleteAssignedCollections": false, + // If the following 3 Collection roles are set to true a custom user has access all permission + "createNewCollections": membership_type == 4 && self.access_all, + "editAnyCollection": membership_type == 4 && self.access_all, + "deleteAnyCollection": membership_type == 4 && self.access_all, "manageGroups": false, "managePolicies": false, "manageSso": false, // Not supported @@ -398,9 +445,9 @@ impl UserOrganization { "ssoBound": false, // Not supported "useSso": false, // Not supported "useKeyConnector": false, - "useSecretsManager": false, + "useSecretsManager": false, // Not supported (Not AGPLv3 Licensed) "usePasswordManager": true, - "useCustomPermissions": false, + "useCustomPermissions": true, "useActivateAutofillPolicy": false, "organizationUserId": self.uuid, @@ -417,9 +464,11 @@ impl UserOrganization { "familySponsorshipValidUntil": null, "familySponsorshipToDelete": null, "accessSecretsManager": false, - "limitCollectionCreationDeletion": false, // This should be set to true only when we can handle roles like createNewCollections + "limitCollectionCreation": self.atype < MembershipType::Manager, // If less then a manager return true, to limit collection creations + "limitCollectionCreationDeletion": true, + "limitCollectionDeletion": true, "allowAdminAccessToAllCollectionItems": true, - "flexibleCollections": false, + "userIsManagedByOrganization": false, // Means not managed via the Members UI, like SSO "permissions": permissions, @@ -429,7 +478,7 @@ impl UserOrganization { "userId": self.user_uuid, "key": self.akey, "status": self.status, - "type": self.atype, + "type": membership_type, "enabled": true, "object": "profileOrganization", @@ -446,16 +495,16 @@ impl UserOrganization { // Because BitWarden want the status to be -1 for revoked users we need to catch that here. // We subtract/add a number so we can restore/activate the user to it's previous state again. - let status = if self.status < UserOrgStatus::Revoked as i32 { - UserOrgStatus::Revoked as i32 + let status = if self.status < MembershipStatus::Revoked as i32 { + MembershipStatus::Revoked as i32 } else { self.status }; let twofactor_enabled = !TwoFactor::find_by_user(&user.uuid, conn).await.is_empty(); - let groups: Vec = if include_groups && CONFIG.org_groups_enabled() { - GroupUser::find_by_user(&self.uuid, conn).await.iter().map(|gu| gu.groups_uuid.clone()).collect() + let groups: Vec = if include_groups && CONFIG.org_groups_enabled() { + GroupUser::find_by_member(&self.uuid, conn).await.iter().map(|gu| gu.groups_uuid.clone()).collect() } else { // The Bitwarden clients seem to call this API regardless of whether groups are enabled, // so just act as if there are no groups. @@ -470,7 +519,7 @@ impl UserOrganization { // If collections are to be included, only include them if the user does not have full access via a group or defined to the user it self let collections: Vec = if include_collections && !(full_access_group || self.has_full_access()) { // Get all collections for the user here already to prevent more queries - let cu: HashMap = + let cu: HashMap = CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn) .await .into_iter() @@ -478,7 +527,7 @@ impl UserOrganization { .collect(); // Get all collection groups for this user to prevent there inclusion - let cg: HashSet = CollectionGroup::find_by_user(&self.user_uuid, conn) + let cg: HashSet = CollectionGroup::find_by_user(&self.user_uuid, conn) .await .into_iter() .map(|cg| cg.collections_uuid) @@ -488,13 +537,13 @@ impl UserOrganization { .await .into_iter() .filter_map(|c| { - let (read_only, hide_passwords, can_manage) = if self.has_full_access() { - (false, false, self.atype >= UserOrgType::Manager) + let (read_only, hide_passwords, manage) = if self.has_full_access() { + (false, false, self.atype >= MembershipType::Manager) } else if let Some(cu) = cu.get(&c.uuid) { ( cu.read_only, cu.hide_passwords, - self.atype == UserOrgType::Manager && !cu.read_only && !cu.hide_passwords, + cu.manage || (self.atype == MembershipType::Manager && !cu.read_only && !cu.hide_passwords), ) // If previous checks failed it might be that this user has access via a group, but we should not return those elements here // Those are returned via a special group endpoint @@ -508,7 +557,7 @@ impl UserOrganization { "id": c.uuid, "readOnly": read_only, "hidePasswords": hide_passwords, - "manage": can_manage, + "manage": manage, })) }) .collect() @@ -516,29 +565,39 @@ impl UserOrganization { Vec::with_capacity(0) }; - let permissions = json!({ - // TODO: Add support for Custom User Roles - // See: https://bitwarden.com/help/article/user-types-access-control/#custom-role - "accessEventLogs": false, - "accessImportExport": false, - "accessReports": false, - "createNewCollections": false, - "editAnyCollection": false, - "deleteAnyCollection": false, - "editAssignedCollections": false, - "deleteAssignedCollections": false, - "manageGroups": false, - "managePolicies": false, - "manageSso": false, // Not supported - "manageUsers": false, - "manageResetPassword": false, - "manageScim": false // Not supported (Not AGPLv3 Licensed) - }); + // HACK: Convert the manager type to a custom type + // It will be converted back on other locations + let membership_type = self.type_manager_as_custom(); + + // HACK: Only return permissions if the user is of type custom and has access_all + // Else Bitwarden will assume the defaults of all false + let permissions = if membership_type == 4 && self.access_all { + json!({ + // TODO: Add full support for Custom User Roles + // See: https://bitwarden.com/help/article/user-types-access-control/#custom-role + // Currently we use the custom role as a manager role and link the 3 Collection roles to mimic the access_all permission + "accessEventLogs": false, + "accessImportExport": false, + "accessReports": false, + // If the following 3 Collection roles are set to true a custom user has access all permission + "createNewCollections": true, + "editAnyCollection": true, + "deleteAnyCollection": true, + "manageGroups": false, + "managePolicies": false, + "manageSso": false, // Not supported + "manageUsers": false, + "manageResetPassword": false, + "manageScim": false // Not supported (Not AGPLv3 Licensed) + }) + } else { + json!(null) + }; json!({ "id": self.uuid, "userId": self.user_uuid, - "name": if self.get_unrevoked_status() >= UserOrgStatus::Accepted as i32 { Some(user.name) } else { None }, + "name": if self.get_unrevoked_status() >= MembershipStatus::Accepted as i32 { Some(user.name) } else { None }, "email": user.email, "externalId": self.external_id, "avatarColor": user.avatar_color, @@ -546,7 +605,7 @@ impl UserOrganization { "collections": collections, "status": status, - "type": self.atype, + "type": membership_type, "accessAll": self.access_all, "twoFactorEnabled": twofactor_enabled, "resetPasswordEnrolled": self.reset_password_key.is_some(), @@ -567,6 +626,7 @@ impl UserOrganization { "id": self.uuid, "readOnly": col_user.read_only, "hidePasswords": col_user.hide_passwords, + "manage": col_user.manage, }) } @@ -578,11 +638,12 @@ impl UserOrganization { CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn).await; collections .iter() - .map(|c| { + .map(|cu| { json!({ - "id": c.collection_uuid, - "readOnly": c.read_only, - "hidePasswords": c.hide_passwords, + "id": cu.collection_uuid, + "readOnly": cu.read_only, + "hidePasswords": cu.hide_passwords, + "manage": cu.manage, }) }) .collect() @@ -590,8 +651,8 @@ impl UserOrganization { // Because BitWarden want the status to be -1 for revoked users we need to catch that here. // We subtract/add a number so we can restore/activate the user to it's previous state again. - let status = if self.status < UserOrgStatus::Revoked as i32 { - UserOrgStatus::Revoked as i32 + let status = if self.status < MembershipStatus::Revoked as i32 { + MembershipStatus::Revoked as i32 } else { self.status }; @@ -608,13 +669,36 @@ impl UserOrganization { "object": "organizationUserDetails", }) } + + pub async fn to_json_mini_details(&self, conn: &mut DbConn) -> Value { + let user = User::find_by_uuid(&self.user_uuid, conn).await.unwrap(); + + // Because Bitwarden wants the status to be -1 for revoked users we need to catch that here. + // We subtract/add a number so we can restore/activate the user to it's previous state again. + let status = if self.status < MembershipStatus::Revoked as i32 { + MembershipStatus::Revoked as i32 + } else { + self.status + }; + + json!({ + "id": self.uuid, + "userId": self.user_uuid, + "type": self.type_manager_as_custom(), // HACK: Convert the manager type to a custom type + "status": status, + "name": user.name, + "email": user.email, + "object": "organizationUserUserMiniDetails", + }) + } + pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { User::update_uuid_revision(&self.user_uuid, conn).await; db_run! { conn: sqlite, mysql { match diesel::replace_into(users_organizations::table) - .values(UserOrganizationDb::to_db(self)) + .values(MembershipDb::to_db(self)) .execute(conn) { Ok(_) => Ok(()), @@ -622,7 +706,7 @@ impl UserOrganization { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(users_organizations::table) .filter(users_organizations::uuid.eq(&self.uuid)) - .set(UserOrganizationDb::to_db(self)) + .set(MembershipDb::to_db(self)) .execute(conn) .map_res("Error adding user to organization") }, @@ -630,7 +714,7 @@ impl UserOrganization { }.map_res("Error adding user to organization") } postgresql { - let value = UserOrganizationDb::to_db(self); + let value = MembershipDb::to_db(self); diesel::insert_into(users_organizations::table) .values(&value) .on_conflict(users_organizations::uuid) @@ -646,7 +730,7 @@ impl UserOrganization { User::update_uuid_revision(&self.user_uuid, conn).await; CollectionUser::delete_all_by_user_and_org(&self.user_uuid, &self.org_uuid, conn).await?; - GroupUser::delete_all_by_user(&self.uuid, conn).await?; + GroupUser::delete_all_by_member(&self.uuid, conn).await?; db_run! { conn: { diesel::delete(users_organizations::table.filter(users_organizations::uuid.eq(self.uuid))) @@ -655,121 +739,129 @@ impl UserOrganization { }} } - pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult { - for user_org in Self::find_by_org(org_uuid, conn).await { - user_org.delete(conn).await?; + pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult { + for member in Self::find_by_org(org_uuid, conn).await { + member.delete(conn).await?; } Ok(()) } - pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { - for user_org in Self::find_any_state_by_user(user_uuid, conn).await { - user_org.delete(conn).await?; + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { + for member in Self::find_any_state_by_user(user_uuid, conn).await { + member.delete(conn).await?; } Ok(()) } - pub async fn find_by_email_and_org(email: &str, org_id: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_email_and_org( + email: &str, + org_uuid: &OrganizationId, + conn: &mut DbConn, + ) -> Option { if let Some(user) = User::find_by_mail(email, conn).await { - if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, org_id, conn).await { - return Some(user_org); + if let Some(member) = Membership::find_by_user_and_org(&user.uuid, org_uuid, conn).await { + return Some(member); } } None } - pub fn has_status(&self, status: UserOrgStatus) -> bool { + pub fn has_status(&self, status: MembershipStatus) -> bool { self.status == status as i32 } - pub fn has_type(&self, user_type: UserOrgType) -> bool { + pub fn has_type(&self, user_type: MembershipType) -> bool { self.atype == user_type as i32 } pub fn has_full_access(&self) -> bool { - (self.access_all || self.atype >= UserOrgType::Admin) && self.has_status(UserOrgStatus::Confirmed) + (self.access_all || self.atype >= MembershipType::Admin) && self.has_status(MembershipStatus::Confirmed) } - pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid(uuid: &MembershipId, conn: &mut DbConn) -> Option { db_run! { conn: { users_organizations::table .filter(users_organizations::uuid.eq(uuid)) - .first::(conn) + .first::(conn) .ok().from_db() }} } - pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid_and_org( + uuid: &MembershipId, + org_uuid: &OrganizationId, + conn: &mut DbConn, + ) -> Option { db_run! { conn: { users_organizations::table .filter(users_organizations::uuid.eq(uuid)) .filter(users_organizations::org_uuid.eq(org_uuid)) - .first::(conn) + .first::(conn) .ok().from_db() }} } - pub async fn find_confirmed_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_confirmed_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) - .filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32)) - .load::(conn) + .filter(users_organizations::status.eq(MembershipStatus::Confirmed as i32)) + .load::(conn) .unwrap_or_default().from_db() }} } - pub async fn find_invited_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_invited_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) - .filter(users_organizations::status.eq(UserOrgStatus::Invited as i32)) - .load::(conn) + .filter(users_organizations::status.eq(MembershipStatus::Invited as i32)) + .load::(conn) .unwrap_or_default().from_db() }} } - pub async fn find_any_state_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_any_state_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) - .load::(conn) + .load::(conn) .unwrap_or_default().from_db() }} } - pub async fn count_accepted_and_confirmed_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 { + pub async fn count_accepted_and_confirmed_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) - .filter(users_organizations::status.eq(UserOrgStatus::Accepted as i32).or(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))) + .filter(users_organizations::status.eq(MembershipStatus::Accepted as i32).or(users_organizations::status.eq(MembershipStatus::Confirmed as i32))) .count() .first::(conn) .unwrap_or(0) }} } - pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) - .load::(conn) + .load::(conn) .expect("Error loading user organizations").from_db() }} } - pub async fn find_confirmed_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_confirmed_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) - .filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32)) - .load::(conn) + .filter(users_organizations::status.eq(MembershipStatus::Confirmed as i32)) + .load::(conn) .unwrap_or_default().from_db() }} } - pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 { + pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -780,71 +872,91 @@ impl UserOrganization { }} } - pub async fn find_by_org_and_type(org_uuid: &str, atype: UserOrgType, conn: &mut DbConn) -> Vec { + pub async fn find_by_org_and_type( + org_uuid: &OrganizationId, + atype: MembershipType, + conn: &mut DbConn, + ) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) .filter(users_organizations::atype.eq(atype as i32)) - .load::(conn) + .load::(conn) .expect("Error loading user organizations").from_db() }} } - pub async fn count_confirmed_by_org_and_type(org_uuid: &str, atype: UserOrgType, conn: &mut DbConn) -> i64 { + pub async fn count_confirmed_by_org_and_type( + org_uuid: &OrganizationId, + atype: MembershipType, + conn: &mut DbConn, + ) -> i64 { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) .filter(users_organizations::atype.eq(atype as i32)) - .filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32)) + .filter(users_organizations::status.eq(MembershipStatus::Confirmed as i32)) .count() .first::(conn) .unwrap_or(0) }} } - pub async fn find_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_user_and_org( + user_uuid: &UserId, + org_uuid: &OrganizationId, + conn: &mut DbConn, + ) -> Option { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) .filter(users_organizations::org_uuid.eq(org_uuid)) - .first::(conn) + .first::(conn) .ok().from_db() }} } - pub async fn find_confirmed_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_confirmed_by_user_and_org( + user_uuid: &UserId, + org_uuid: &OrganizationId, + conn: &mut DbConn, + ) -> Option { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) .filter(users_organizations::org_uuid.eq(org_uuid)) .filter( - users_organizations::status.eq(UserOrgStatus::Confirmed as i32) + users_organizations::status.eq(MembershipStatus::Confirmed as i32) ) - .first::(conn) + .first::(conn) .ok().from_db() }} } - pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) - .load::(conn) + .load::(conn) .expect("Error loading user organizations").from_db() }} } - pub async fn get_org_uuid_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn get_orgs_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) .select(users_organizations::org_uuid) - .load::(conn) + .load::(conn) .unwrap_or_default() }} } - pub async fn find_by_user_and_policy(user_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> Vec { + pub async fn find_by_user_and_policy( + user_uuid: &UserId, + policy_type: OrgPolicyType, + conn: &mut DbConn, + ) -> Vec { db_run! { conn: { users_organizations::table .inner_join( @@ -855,15 +967,19 @@ impl UserOrganization { .and(org_policies::enabled.eq(true))) ) .filter( - users_organizations::status.eq(UserOrgStatus::Confirmed as i32) + users_organizations::status.eq(MembershipStatus::Confirmed as i32) ) .select(users_organizations::all_columns) - .load::(conn) + .load::(conn) .unwrap_or_default().from_db() }} } - pub async fn find_by_cipher_and_org(cipher_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_cipher_and_org( + cipher_uuid: &CipherId, + org_uuid: &OrganizationId, + conn: &mut DbConn, + ) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -882,11 +998,15 @@ impl UserOrganization { ) .select(users_organizations::all_columns) .distinct() - .load::(conn).expect("Error loading user organizations").from_db() + .load::(conn).expect("Error loading user organizations").from_db() }} } - pub async fn find_by_cipher_and_org_with_group(cipher_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_cipher_and_org_with_group( + cipher_uuid: &CipherId, + org_uuid: &OrganizationId, + conn: &mut DbConn, + ) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -908,23 +1028,31 @@ impl UserOrganization { ) .select(users_organizations::all_columns) .distinct() - .load::(conn).expect("Error loading user organizations with groups").from_db() + .load::(conn).expect("Error loading user organizations with groups").from_db() }} } - pub async fn user_has_ge_admin_access_to_cipher(user_uuid: &str, cipher_uuid: &str, conn: &mut DbConn) -> bool { + pub async fn user_has_ge_admin_access_to_cipher( + user_uuid: &UserId, + cipher_uuid: &CipherId, + conn: &mut DbConn, + ) -> bool { db_run! { conn: { users_organizations::table .inner_join(ciphers::table.on(ciphers::uuid.eq(cipher_uuid).and(ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())))) .filter(users_organizations::user_uuid.eq(user_uuid)) - .filter(users_organizations::atype.eq_any(vec![UserOrgType::Owner as i32, UserOrgType::Admin as i32])) + .filter(users_organizations::atype.eq_any(vec![MembershipType::Owner as i32, MembershipType::Admin as i32])) .count() .first::(conn) .ok().unwrap_or(0) != 0 }} } - pub async fn find_by_collection_and_org(collection_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_collection_and_org( + collection_uuid: &CollectionId, + org_uuid: &OrganizationId, + conn: &mut DbConn, + ) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -937,18 +1065,22 @@ impl UserOrganization { ) ) .select(users_organizations::all_columns) - .load::(conn).expect("Error loading user organizations").from_db() + .load::(conn).expect("Error loading user organizations").from_db() }} } - pub async fn find_by_external_id_and_org(ext_id: &str, org_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_external_id_and_org( + ext_id: &str, + org_uuid: &OrganizationId, + conn: &mut DbConn, + ) -> Option { db_run! {conn: { users_organizations::table .filter( users_organizations::external_id.eq(ext_id) .and(users_organizations::org_uuid.eq(org_uuid)) ) - .first::(conn).ok().from_db() + .first::(conn).ok().from_db() }} } } @@ -987,7 +1119,7 @@ impl OrganizationApiKey { } } - pub async fn find_by_org_uuid(org_uuid: &str, conn: &DbConn) -> Option { + pub async fn find_by_org_uuid(org_uuid: &OrganizationId, conn: &DbConn) -> Option { db_run! { conn: { organization_api_key::table .filter(organization_api_key::org_uuid.eq(org_uuid)) @@ -996,7 +1128,7 @@ impl OrganizationApiKey { }} } - pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(organization_api_key::table.filter(organization_api_key::org_uuid.eq(org_uuid))) .execute(conn) @@ -1005,15 +1137,56 @@ impl OrganizationApiKey { } } +#[derive( + Clone, + Debug, + AsRef, + Deref, + DieselNewType, + Display, + From, + FromForm, + Hash, + PartialEq, + Eq, + Serialize, + Deserialize, + UuidFromParam, +)] +#[deref(forward)] +#[from(forward)] +pub struct OrganizationId(String); + +#[derive( + Clone, + Debug, + Deref, + DieselNewType, + Display, + From, + FromForm, + Hash, + PartialEq, + Eq, + Serialize, + Deserialize, + UuidFromParam, +)] +pub struct MembershipId(String); + +#[derive(Clone, Debug, DieselNewType, Display, FromForm, Hash, PartialEq, Eq, Serialize, Deserialize)] +pub struct OrgApiKeyId(String); + #[cfg(test)] mod tests { use super::*; #[test] #[allow(non_snake_case)] - fn partial_cmp_UserOrgType() { - assert!(UserOrgType::Owner > UserOrgType::Admin); - assert!(UserOrgType::Admin > UserOrgType::Manager); - assert!(UserOrgType::Manager > UserOrgType::User); + fn partial_cmp_MembershipType() { + assert!(MembershipType::Owner > MembershipType::Admin); + assert!(MembershipType::Admin > MembershipType::Manager); + assert!(MembershipType::Manager > MembershipType::User); + assert!(MembershipType::Manager == MembershipType::from_str("4").unwrap()); } } diff --git a/src/db/models/send.rs b/src/db/models/send.rs index 36944281..c0bb0b33 100644 --- a/src/db/models/send.rs +++ b/src/db/models/send.rs @@ -3,7 +3,8 @@ use serde_json::Value; use crate::util::LowerCase; -use super::User; +use super::{OrganizationId, User, UserId}; +use id::SendId; db_object! { #[derive(Identifiable, Queryable, Insertable, AsChangeset)] @@ -11,11 +12,10 @@ db_object! { #[diesel(treat_none_as_null = true)] #[diesel(primary_key(uuid))] pub struct Send { - pub uuid: String, - - pub user_uuid: Option, - pub organization_uuid: Option, + pub uuid: SendId, + pub user_uuid: Option, + pub organization_uuid: Option, pub name: String, pub notes: Option, @@ -51,7 +51,7 @@ impl Send { let now = Utc::now().naive_utc(); Self { - uuid: crate::util::get_uuid(), + uuid: SendId::from(crate::util::get_uuid()), user_uuid: None, organization_uuid: None, @@ -243,7 +243,7 @@ impl Send { } } - pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec { + pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec { let mut user_uuids = Vec::new(); match &self.user_uuid { Some(user_uuid) => { @@ -257,7 +257,7 @@ impl Send { user_uuids } - pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { for send in Self::find_by_user(user_uuid, conn).await { send.delete(conn).await?; } @@ -268,20 +268,19 @@ impl Send { use data_encoding::BASE64URL_NOPAD; use uuid::Uuid; - let uuid_vec = match BASE64URL_NOPAD.decode(access_id.as_bytes()) { - Ok(v) => v, - Err(_) => return None, + let Ok(uuid_vec) = BASE64URL_NOPAD.decode(access_id.as_bytes()) else { + return None; }; let uuid = match Uuid::from_slice(&uuid_vec) { - Ok(u) => u.to_string(), + Ok(u) => SendId::from(u.to_string()), Err(_) => return None, }; Self::find_by_uuid(&uuid, conn).await } - pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid(uuid: &SendId, conn: &mut DbConn) -> Option { db_run! {conn: { sends::table .filter(sends::uuid.eq(uuid)) @@ -291,7 +290,18 @@ impl Send { }} } - pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_uuid_and_user(uuid: &SendId, user_uuid: &UserId, conn: &mut DbConn) -> Option { + db_run! {conn: { + sends::table + .filter(sends::uuid.eq(uuid)) + .filter(sends::user_uuid.eq(user_uuid)) + .first::(conn) + .ok() + .from_db() + }} + } + + pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! {conn: { sends::table .filter(sends::user_uuid.eq(user_uuid)) @@ -299,7 +309,7 @@ impl Send { }} } - pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn size_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Option { let sends = Self::find_by_user(user_uuid, conn).await; #[derive(serde::Deserialize)] @@ -322,7 +332,7 @@ impl Send { Some(total) } - pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { db_run! {conn: { sends::table .filter(sends::organization_uuid.eq(org_uuid)) @@ -339,3 +349,48 @@ impl Send { }} } } + +// separate namespace to avoid name collision with std::marker::Send +pub mod id { + use derive_more::{AsRef, Deref, Display, From}; + use macros::{IdFromParam, UuidFromParam}; + use std::marker::Send; + use std::path::Path; + + #[derive( + Clone, + Debug, + AsRef, + Deref, + DieselNewType, + Display, + From, + FromForm, + Hash, + PartialEq, + Eq, + Serialize, + Deserialize, + UuidFromParam, + )] + pub struct SendId(String); + + impl AsRef for SendId { + #[inline] + fn as_ref(&self) -> &Path { + Path::new(&self.0) + } + } + + #[derive( + Clone, Debug, AsRef, Deref, Display, From, FromForm, Hash, PartialEq, Eq, Serialize, Deserialize, IdFromParam, + )] + pub struct SendFileId(String); + + impl AsRef for SendFileId { + #[inline] + fn as_ref(&self) -> &Path { + Path::new(&self.0) + } + } +} diff --git a/src/db/models/two_factor.rs b/src/db/models/two_factor.rs index 9155c518..0f5a5de5 100644 --- a/src/db/models/two_factor.rs +++ b/src/db/models/two_factor.rs @@ -1,5 +1,6 @@ use serde_json::Value; +use super::UserId; use crate::{api::EmptyResult, db::DbConn, error::MapResult}; db_object! { @@ -7,8 +8,8 @@ db_object! { #[diesel(table_name = twofactor)] #[diesel(primary_key(uuid))] pub struct TwoFactor { - pub uuid: String, - pub user_uuid: String, + pub uuid: TwoFactorId, + pub user_uuid: UserId, pub atype: i32, pub enabled: bool, pub data: String, @@ -41,9 +42,9 @@ pub enum TwoFactorType { /// Local methods impl TwoFactor { - pub fn new(user_uuid: String, atype: TwoFactorType, data: String) -> Self { + pub fn new(user_uuid: UserId, atype: TwoFactorType, data: String) -> Self { Self { - uuid: crate::util::get_uuid(), + uuid: TwoFactorId(crate::util::get_uuid()), user_uuid, atype: atype as i32, enabled: true, @@ -118,7 +119,7 @@ impl TwoFactor { }} } - pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { db_run! { conn: { twofactor::table .filter(twofactor::user_uuid.eq(user_uuid)) @@ -129,7 +130,7 @@ impl TwoFactor { }} } - pub async fn find_by_user_and_type(user_uuid: &str, atype: i32, conn: &mut DbConn) -> Option { + pub async fn find_by_user_and_type(user_uuid: &UserId, atype: i32, conn: &mut DbConn) -> Option { db_run! { conn: { twofactor::table .filter(twofactor::user_uuid.eq(user_uuid)) @@ -140,7 +141,7 @@ impl TwoFactor { }} } - pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid))) .execute(conn) @@ -217,3 +218,6 @@ impl TwoFactor { Ok(()) } } + +#[derive(Clone, Debug, DieselNewType, FromForm, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct TwoFactorId(String); diff --git a/src/db/models/two_factor_incomplete.rs b/src/db/models/two_factor_incomplete.rs index 12813eb5..b8dc4ad7 100644 --- a/src/db/models/two_factor_incomplete.rs +++ b/src/db/models/two_factor_incomplete.rs @@ -1,17 +1,26 @@ use chrono::{NaiveDateTime, Utc}; -use crate::{api::EmptyResult, auth::ClientIp, db::DbConn, error::MapResult, CONFIG}; +use crate::{ + api::EmptyResult, + auth::ClientIp, + db::{ + models::{DeviceId, UserId}, + DbConn, + }, + error::MapResult, + CONFIG, +}; db_object! { #[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[diesel(table_name = twofactor_incomplete)] #[diesel(primary_key(user_uuid, device_uuid))] pub struct TwoFactorIncomplete { - pub user_uuid: String, + pub user_uuid: UserId, // This device UUID is simply what's claimed by the device. It doesn't // necessarily correspond to any UUID in the devices table, since a device // must complete 2FA login before being added into the devices table. - pub device_uuid: String, + pub device_uuid: DeviceId, pub device_name: String, pub device_type: i32, pub login_time: NaiveDateTime, @@ -21,8 +30,8 @@ db_object! { impl TwoFactorIncomplete { pub async fn mark_incomplete( - user_uuid: &str, - device_uuid: &str, + user_uuid: &UserId, + device_uuid: &DeviceId, device_name: &str, device_type: i32, ip: &ClientIp, @@ -55,7 +64,7 @@ impl TwoFactorIncomplete { }} } - pub async fn mark_complete(user_uuid: &str, device_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn mark_complete(user_uuid: &UserId, device_uuid: &DeviceId, conn: &mut DbConn) -> EmptyResult { if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() { return Ok(()); } @@ -63,7 +72,11 @@ impl TwoFactorIncomplete { Self::delete_by_user_and_device(user_uuid, device_uuid, conn).await } - pub async fn find_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_user_and_device( + user_uuid: &UserId, + device_uuid: &DeviceId, + conn: &mut DbConn, + ) -> Option { db_run! { conn: { twofactor_incomplete::table .filter(twofactor_incomplete::user_uuid.eq(user_uuid)) @@ -88,7 +101,11 @@ impl TwoFactorIncomplete { Self::delete_by_user_and_device(&self.user_uuid, &self.device_uuid, conn).await } - pub async fn delete_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_by_user_and_device( + user_uuid: &UserId, + device_uuid: &DeviceId, + conn: &mut DbConn, + ) -> EmptyResult { db_run! { conn: { diesel::delete(twofactor_incomplete::table .filter(twofactor_incomplete::user_uuid.eq(user_uuid)) @@ -98,7 +115,7 @@ impl TwoFactorIncomplete { }} } - pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(twofactor_incomplete::table.filter(twofactor_incomplete::user_uuid.eq(user_uuid))) .execute(conn) diff --git a/src/db/models/user.rs b/src/db/models/user.rs index 94f42c84..8978fc5a 100644 --- a/src/db/models/user.rs +++ b/src/db/models/user.rs @@ -1,9 +1,19 @@ -use crate::util::{format_date, get_uuid, retry}; use chrono::{NaiveDateTime, TimeDelta, Utc}; +use derive_more::{AsRef, Deref, Display, From}; use serde_json::Value; -use crate::crypto; -use crate::CONFIG; +use super::{ + Cipher, Device, EmergencyAccess, Favorite, Folder, Membership, MembershipType, TwoFactor, TwoFactorIncomplete, +}; +use crate::{ + api::EmptyResult, + crypto, + db::DbConn, + error::MapResult, + util::{format_date, get_uuid, retry}, + CONFIG, +}; +use macros::UuidFromParam; db_object! { #[derive(Identifiable, Queryable, Insertable, AsChangeset)] @@ -11,7 +21,7 @@ db_object! { #[diesel(treat_none_as_null = true)] #[diesel(primary_key(uuid))] pub struct User { - pub uuid: String, + pub uuid: UserId, pub enabled: bool, pub created_at: NaiveDateTime, pub updated_at: NaiveDateTime, @@ -91,7 +101,7 @@ impl User { let email = email.to_lowercase(); Self { - uuid: get_uuid(), + uuid: UserId(get_uuid()), enabled: true, created_at: now, updated_at: now, @@ -214,20 +224,11 @@ impl User { } } -use super::{ - Cipher, Device, EmergencyAccess, Favorite, Folder, Send, TwoFactor, TwoFactorIncomplete, UserOrgType, - UserOrganization, -}; -use crate::db::DbConn; - -use crate::api::EmptyResult; -use crate::error::MapResult; - /// Database methods impl User { pub async fn to_json(&self, conn: &mut DbConn) -> Value { let mut orgs_json = Vec::new(); - for c in UserOrganization::find_confirmed_by_user(&self.uuid, conn).await { + for c in Membership::find_confirmed_by_user(&self.uuid, conn).await { orgs_json.push(c.to_json(conn).await); } @@ -266,8 +267,8 @@ impl User { } pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult { - if self.email.trim().is_empty() { - err!("User email can't be empty") + if !crate::util::is_valid_email(&self.email) { + err!(format!("User email {} is not a valid email address", self.email)) } self.updated_at = Utc::now().naive_utc(); @@ -304,19 +305,18 @@ impl User { } pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { - for user_org in UserOrganization::find_confirmed_by_user(&self.uuid, conn).await { - if user_org.atype == UserOrgType::Owner - && UserOrganization::count_confirmed_by_org_and_type(&user_org.org_uuid, UserOrgType::Owner, conn).await - <= 1 + for member in Membership::find_confirmed_by_user(&self.uuid, conn).await { + if member.atype == MembershipType::Owner + && Membership::count_confirmed_by_org_and_type(&member.org_uuid, MembershipType::Owner, conn).await <= 1 { err!("Can't delete last owner") } } - Send::delete_all_by_user(&self.uuid, conn).await?; + super::Send::delete_all_by_user(&self.uuid, conn).await?; EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?; EmergencyAccess::delete_all_by_grantee_email(&self.email, conn).await?; - UserOrganization::delete_all_by_user(&self.uuid, conn).await?; + Membership::delete_all_by_user(&self.uuid, conn).await?; Cipher::delete_all_by_user(&self.uuid, conn).await?; Favorite::delete_all_by_user(&self.uuid, conn).await?; Folder::delete_all_by_user(&self.uuid, conn).await?; @@ -332,7 +332,7 @@ impl User { }} } - pub async fn update_uuid_revision(uuid: &str, conn: &mut DbConn) { + pub async fn update_uuid_revision(uuid: &UserId, conn: &mut DbConn) { if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await { warn!("Failed to update revision for {}: {:#?}", uuid, e); } @@ -357,7 +357,7 @@ impl User { Self::_update_revision(&self.uuid, &self.updated_at, conn).await } - async fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult { + async fn _update_revision(uuid: &UserId, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult { db_run! {conn: { retry(|| { diesel::update(users::table.filter(users::uuid.eq(uuid))) @@ -379,7 +379,7 @@ impl User { }} } - pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid(uuid: &UserId, conn: &mut DbConn) -> Option { db_run! {conn: { users::table.filter(users::uuid.eq(uuid)).first::(conn).ok().from_db() }} @@ -408,8 +408,8 @@ impl Invitation { } pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { - if self.email.trim().is_empty() { - err!("Invitation email can't be empty") + if !crate::util::is_valid_email(&self.email) { + err!(format!("Invitation email {} is not a valid email address", self.email)) } db_run! {conn: @@ -458,3 +458,23 @@ impl Invitation { } } } + +#[derive( + Clone, + Debug, + DieselNewType, + FromForm, + PartialEq, + Eq, + Hash, + Serialize, + Deserialize, + AsRef, + Deref, + Display, + From, + UuidFromParam, +)] +#[deref(forward)] +#[from(forward)] +pub struct UserId(String); diff --git a/src/db/schemas/mysql/schema.rs b/src/db/schemas/mysql/schema.rs index fa84ed05..573e4503 100644 --- a/src/db/schemas/mysql/schema.rs +++ b/src/db/schemas/mysql/schema.rs @@ -226,6 +226,7 @@ table! { collection_uuid -> Text, read_only -> Bool, hide_passwords -> Bool, + manage -> Bool, } } @@ -295,6 +296,7 @@ table! { groups_uuid -> Text, read_only -> Bool, hide_passwords -> Bool, + manage -> Bool, } } diff --git a/src/db/schemas/postgresql/schema.rs b/src/db/schemas/postgresql/schema.rs index d1ea4b02..a3707adf 100644 --- a/src/db/schemas/postgresql/schema.rs +++ b/src/db/schemas/postgresql/schema.rs @@ -226,6 +226,7 @@ table! { collection_uuid -> Text, read_only -> Bool, hide_passwords -> Bool, + manage -> Bool, } } @@ -295,6 +296,7 @@ table! { groups_uuid -> Text, read_only -> Bool, hide_passwords -> Bool, + manage -> Bool, } } diff --git a/src/db/schemas/sqlite/schema.rs b/src/db/schemas/sqlite/schema.rs index d1ea4b02..a3707adf 100644 --- a/src/db/schemas/sqlite/schema.rs +++ b/src/db/schemas/sqlite/schema.rs @@ -226,6 +226,7 @@ table! { collection_uuid -> Text, read_only -> Bool, hide_passwords -> Bool, + manage -> Bool, } } @@ -295,6 +296,7 @@ table! { groups_uuid -> Text, read_only -> Bool, hide_passwords -> Bool, + manage -> Bool, } } diff --git a/src/mail.rs b/src/mail.rs index 1754400b..7a4deaec 100644 --- a/src/mail.rs +++ b/src/mail.rs @@ -1,7 +1,6 @@ -use std::str::FromStr; - use chrono::NaiveDateTime; use percent_encoding::{percent_encode, NON_ALPHANUMERIC}; +use std::{env::consts::EXE_SUFFIX, str::FromStr}; use lettre::{ message::{Attachment, Body, Mailbox, Message, MultiPart, SinglePart}, @@ -17,7 +16,7 @@ use crate::{ encode_jwt, generate_delete_claims, generate_emergency_access_invite_claims, generate_invite_claims, generate_verify_email_claims, }, - db::models::{Device, DeviceType, User}, + db::models::{Device, DeviceType, EmergencyAccessId, MembershipId, OrganizationId, User, UserId}, error::Error, CONFIG, }; @@ -26,7 +25,7 @@ fn sendmail_transport() -> AsyncSendmailTransport { if let Some(command) = CONFIG.sendmail_command() { AsyncSendmailTransport::new_with_command(command) } else { - AsyncSendmailTransport::new() + AsyncSendmailTransport::new_with_command(format!("sendmail{EXE_SUFFIX}")) } } @@ -166,8 +165,8 @@ pub async fn send_password_hint(address: &str, hint: Option) -> EmptyRes send_email(address, &subject, body_html, body_text).await } -pub async fn send_delete_account(address: &str, uuid: &str) -> EmptyResult { - let claims = generate_delete_claims(uuid.to_string()); +pub async fn send_delete_account(address: &str, user_id: &UserId) -> EmptyResult { + let claims = generate_delete_claims(user_id.to_string()); let delete_token = encode_jwt(&claims); let (subject, body_html, body_text) = get_text( @@ -175,7 +174,7 @@ pub async fn send_delete_account(address: &str, uuid: &str) -> EmptyResult { json!({ "url": CONFIG.domain(), "img_src": CONFIG._smtp_img_src(), - "user_id": uuid, + "user_id": user_id, "email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(), "token": delete_token, }), @@ -184,8 +183,8 @@ pub async fn send_delete_account(address: &str, uuid: &str) -> EmptyResult { send_email(address, &subject, body_html, body_text).await } -pub async fn send_verify_email(address: &str, uuid: &str) -> EmptyResult { - let claims = generate_verify_email_claims(uuid.to_string()); +pub async fn send_verify_email(address: &str, user_id: &UserId) -> EmptyResult { + let claims = generate_verify_email_claims(user_id.clone()); let verify_email_token = encode_jwt(&claims); let (subject, body_html, body_text) = get_text( @@ -193,7 +192,7 @@ pub async fn send_verify_email(address: &str, uuid: &str) -> EmptyResult { json!({ "url": CONFIG.domain(), "img_src": CONFIG._smtp_img_src(), - "user_id": uuid, + "user_id": user_id, "email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(), "token": verify_email_token, }), @@ -236,8 +235,8 @@ pub async fn send_welcome(address: &str) -> EmptyResult { send_email(address, &subject, body_html, body_text).await } -pub async fn send_welcome_must_verify(address: &str, uuid: &str) -> EmptyResult { - let claims = generate_verify_email_claims(uuid.to_string()); +pub async fn send_welcome_must_verify(address: &str, user_id: &UserId) -> EmptyResult { + let claims = generate_verify_email_claims(user_id.clone()); let verify_email_token = encode_jwt(&claims); let (subject, body_html, body_text) = get_text( @@ -245,7 +244,7 @@ pub async fn send_welcome_must_verify(address: &str, uuid: &str) -> EmptyResult json!({ "url": CONFIG.domain(), "img_src": CONFIG._smtp_img_src(), - "user_id": uuid, + "user_id": user_id, "token": verify_email_token, }), )?; @@ -281,8 +280,8 @@ pub async fn send_single_org_removed_from_org(address: &str, org_name: &str) -> pub async fn send_invite( user: &User, - org_id: Option, - org_user_id: Option, + org_id: OrganizationId, + member_id: MembershipId, org_name: &str, invited_by_email: Option, ) -> EmptyResult { @@ -290,7 +289,7 @@ pub async fn send_invite( user.uuid.clone(), user.email.clone(), org_id.clone(), - org_user_id.clone(), + member_id.clone(), invited_by_email, ); let invite_token = encode_jwt(&claims); @@ -300,17 +299,16 @@ pub async fn send_invite( query_params .append_pair("email", &user.email) .append_pair("organizationName", org_name) - .append_pair("organizationId", org_id.as_deref().unwrap_or("_")) - .append_pair("organizationUserId", org_user_id.as_deref().unwrap_or("_")) + .append_pair("organizationId", &org_id) + .append_pair("organizationUserId", &member_id) .append_pair("token", &invite_token); if user.private_key.is_some() { query_params.append_pair("orgUserHasExistingUser", "true"); } } - let query_string = match query.query() { - None => err!("Failed to build invite URL query parameters"), - Some(query) => query, + let Some(query_string) = query.query() else { + err!("Failed to build invite URL query parameters") }; let (subject, body_html, body_text) = get_text( @@ -328,15 +326,15 @@ pub async fn send_invite( pub async fn send_emergency_access_invite( address: &str, - uuid: &str, - emer_id: &str, + user_id: UserId, + emer_id: EmergencyAccessId, grantor_name: &str, grantor_email: &str, ) -> EmptyResult { let claims = generate_emergency_access_invite_claims( - String::from(uuid), + user_id, String::from(address), - String::from(emer_id), + emer_id.clone(), String::from(grantor_name), String::from(grantor_email), ); @@ -346,15 +344,14 @@ pub async fn send_emergency_access_invite( { let mut query_params = query.query_pairs_mut(); query_params - .append_pair("id", emer_id) + .append_pair("id", &emer_id.to_string()) .append_pair("name", grantor_name) .append_pair("email", address) .append_pair("token", &encode_jwt(&claims)); } - let query_string = match query.query() { - None => err!("Failed to build emergency invite URL query parameters"), - Some(query) => query, + let Some(query_string) = query.query() else { + err!("Failed to build emergency invite URL query parameters") }; let (subject, body_html, body_text) = get_text( @@ -619,13 +616,13 @@ async fn send_with_selected_transport(email: Message) -> EmptyResult { // Match some common errors and make them more user friendly Err(e) => { if e.is_client() { - debug!("Sendmail client error: {:#?}", e); + debug!("Sendmail client error: {:?}", e); err!(format!("Sendmail client error: {e}")); } else if e.is_response() { - debug!("Sendmail response error: {:#?}", e); + debug!("Sendmail response error: {:?}", e); err!(format!("Sendmail response error: {e}")); } else { - debug!("Sendmail error: {:#?}", e); + debug!("Sendmail error: {:?}", e); err!(format!("Sendmail error: {e}")); } } diff --git a/src/main.rs b/src/main.rs index 7e180e2e..530c7b2c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -24,6 +24,8 @@ extern crate log; extern crate diesel; #[macro_use] extern crate diesel_migrations; +#[macro_use] +extern crate diesel_derive_newtype; use std::{ collections::HashMap, @@ -67,7 +69,7 @@ pub use util::is_running_in_container; #[rocket::main] async fn main() -> Result<(), Error> { - parse_args(); + parse_args().await; launch_info(); let level = init_logging()?; @@ -115,7 +117,7 @@ PRESETS: m= t= p= pub const VERSION: Option<&str> = option_env!("VW_VERSION"); -fn parse_args() { +async fn parse_args() { let mut pargs = pico_args::Arguments::from_env(); let version = VERSION.unwrap_or("(Version info from Git not present)"); @@ -186,7 +188,7 @@ fn parse_args() { exit(1); } } else if command == "backup" { - match backup_sqlite() { + match backup_sqlite().await { Ok(f) => { println!("Backup to '{f}' was successful"); exit(0); @@ -201,25 +203,20 @@ fn parse_args() { } } -fn backup_sqlite() -> Result { - #[cfg(sqlite)] - { - use crate::db::{backup_sqlite_database, DbConnType}; - if DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false) { - use diesel::Connection; - let url = CONFIG.database_url(); +async fn backup_sqlite() -> Result { + use crate::db::{backup_database, DbConnType}; + if DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false) { + // Establish a connection to the sqlite database + let mut conn = db::DbPool::from_config() + .expect("SQLite database connection failed") + .get() + .await + .expect("Unable to get SQLite db pool"); - // Establish a connection to the sqlite database - let mut conn = diesel::sqlite::SqliteConnection::establish(&url)?; - let backup_file = backup_sqlite_database(&mut conn)?; - Ok(backup_file) - } else { - err_silent!("The database type is not SQLite. Backups only works for SQLite databases") - } - } - #[cfg(not(sqlite))] - { - err_silent!("The 'sqlite' feature is not enabled. Backups only works for SQLite databases") + let backup_file = backup_database(&mut conn).await?; + Ok(backup_file) + } else { + err_silent!("The database type is not SQLite. Backups only works for SQLite databases") } } @@ -610,7 +607,7 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error> // If we need more signals to act upon, we might want to use select! here. // With only one item to listen for this is enough. let _ = signal_user1.recv().await; - match backup_sqlite() { + match backup_sqlite().await { Ok(f) => info!("Backup to '{f}' was successful"), Err(e) => error!("Backup failed. {e:?}"), } diff --git a/src/static/scripts/admin.css b/src/static/scripts/admin.css index 1db8d4c0..ee035ac4 100644 --- a/src/static/scripts/admin.css +++ b/src/static/scripts/admin.css @@ -38,8 +38,8 @@ img { max-width: 130px; } #users-table .vw-actions, #orgs-table .vw-actions { - min-width: 130px; - max-width: 130px; + min-width: 135px; + max-width: 140px; } #users-table .vw-org-cell { max-height: 120px; diff --git a/src/static/scripts/admin_diagnostics.js b/src/static/scripts/admin_diagnostics.js index 6a178e4b..566e6a56 100644 --- a/src/static/scripts/admin_diagnostics.js +++ b/src/static/scripts/admin_diagnostics.js @@ -7,6 +7,8 @@ var timeCheck = false; var ntpTimeCheck = false; var domainCheck = false; var httpsCheck = false; +var websocketCheck = false; +var httpResponseCheck = false; // ================================ // Date & Time Check @@ -76,18 +78,15 @@ async function generateSupportString(event, dj) { event.preventDefault(); event.stopPropagation(); - let supportString = "### Your environment (Generated via diagnostics page)\n"; + let supportString = "### Your environment (Generated via diagnostics page)\n\n"; supportString += `* Vaultwarden version: v${dj.current_release}\n`; supportString += `* Web-vault version: v${dj.web_vault_version}\n`; supportString += `* OS/Arch: ${dj.host_os}/${dj.host_arch}\n`; supportString += `* Running within a container: ${dj.running_within_container} (Base: ${dj.container_base_image})\n`; - supportString += "* Environment settings overridden: "; - if (dj.overrides != "") { - supportString += "true\n"; - } else { - supportString += "false\n"; - } + supportString += `* Database type: ${dj.db_type}\n`; + supportString += `* Database version: ${dj.db_version}\n`; + supportString += `* Environment settings overridden!: ${dj.overrides !== ""}\n`; supportString += `* Uses a reverse proxy: ${dj.ip_header_exists}\n`; if (dj.ip_header_exists) { supportString += `* IP Header check: ${dj.ip_header_match} (${dj.ip_header_name})\n`; @@ -99,11 +98,12 @@ async function generateSupportString(event, dj) { supportString += `* Server/NTP Time Check: ${ntpTimeCheck}\n`; supportString += `* Domain Configuration Check: ${domainCheck}\n`; supportString += `* HTTPS Check: ${httpsCheck}\n`; - supportString += `* Database type: ${dj.db_type}\n`; - supportString += `* Database version: ${dj.db_version}\n`; - supportString += "* Clients used: \n"; - supportString += "* Reverse proxy and version: \n"; - supportString += "* Other relevant information: \n"; + if (dj.enable_websocket) { + supportString += `* Websocket Check: ${websocketCheck}\n`; + } else { + supportString += "* Websocket Check: disabled\n"; + } + supportString += `* HTTP Response Checks: ${httpResponseCheck}\n`; const jsonResponse = await fetch(`${BASE_URL}/admin/diagnostics/config`, { "headers": { "Accept": "application/json" } @@ -113,10 +113,30 @@ async function generateSupportString(event, dj) { throw new Error(jsonResponse); } const configJson = await jsonResponse.json(); - supportString += "\n### Config (Generated via diagnostics page)\n
Show Running Config\n"; - supportString += `\n**Environment settings which are overridden:** ${dj.overrides}\n`; - supportString += "\n\n```json\n" + JSON.stringify(configJson, undefined, 2) + "\n```\n
\n"; + // Start Config and Details section within a details block which is collapsed by default + supportString += "\n### Config & Details (Generated via diagnostics page)\n\n"; + supportString += "
Show Config & Details\n"; + + // Add overrides if they exists + if (dj.overrides != "") { + supportString += `\n**Environment settings which are overridden:** ${dj.overrides}\n`; + } + + // Add http response check messages if they exists + if (httpResponseCheck === false) { + supportString += "\n**Failed HTTP Checks:**\n"; + // We use `innerText` here since that will convert
into new-lines + supportString += "\n```yaml\n" + document.getElementById("http-response-errors").innerText.trim() + "\n```\n"; + } + + // Add the current config in json form + supportString += "\n**Config:**\n"; + supportString += "\n```json\n" + JSON.stringify(configJson, undefined, 2) + "\n```\n"; + + supportString += "\n
\n"; + + // Add the support string to the textbox so it can be viewed and copied document.getElementById("support-string").textContent = supportString; document.getElementById("support-string").classList.remove("d-none"); document.getElementById("copy-support").classList.remove("d-none"); @@ -199,6 +219,165 @@ function checkDns(dns_resolved) { } } +async function fetchCheckUrl(url) { + try { + const response = await fetch(url); + return { headers: response.headers, status: response.status, text: await response.text() }; + } catch (error) { + console.error(`Error fetching ${url}: ${error}`); + return { error }; + } +} + +function checkSecurityHeaders(headers, omit) { + let securityHeaders = { + "x-frame-options": ["SAMEORIGIN"], + "x-content-type-options": ["nosniff"], + "referrer-policy": ["same-origin"], + "x-xss-protection": ["0"], + "x-robots-tag": ["noindex", "nofollow"], + "cross-origin-resource-policy": ["same-origin"], + "content-security-policy": [ + "default-src 'none'", + "font-src 'self'", + "manifest-src 'self'", + "base-uri 'self'", + "form-action 'self'", + "object-src 'self' blob:", + "script-src 'self' 'wasm-unsafe-eval'", + "style-src 'self' 'unsafe-inline'", + "child-src 'self' https://*.duosecurity.com https://*.duofederal.com", + "frame-src 'self' https://*.duosecurity.com https://*.duofederal.com", + "frame-ancestors 'self' chrome-extension://nngceckbapebfimnlniiiahkandclblb chrome-extension://jbkfoedolllekgbhcbcoahefnbanhhlh moz-extension://*", + "img-src 'self' data: https://haveibeenpwned.com", + "connect-src 'self' https://api.pwnedpasswords.com https://api.2fa.directory https://app.simplelogin.io/api/ https://app.addy.io/api/ https://api.fastmail.com/ https://api.forwardemail.net", + ] + }; + + let messages = []; + for (let header in securityHeaders) { + // Skip some headers for specific endpoints if needed + if (typeof omit === "object" && omit.includes(header) === true) { + continue; + } + // If the header exists, check if the contents matches what we expect it to be + let headerValue = headers.get(header); + if (headerValue !== null) { + securityHeaders[header].forEach((expectedValue) => { + if (headerValue.indexOf(expectedValue) === -1) { + messages.push(`'${header}' does not contain '${expectedValue}'`); + } + }); + } else { + messages.push(`'${header}' is missing!`); + } + } + return messages; +} + +async function checkHttpResponse() { + const [apiConfig, webauthnConnector, notFound, notFoundApi, badRequest, unauthorized, forbidden] = await Promise.all([ + fetchCheckUrl(`${BASE_URL}/api/config`), + fetchCheckUrl(`${BASE_URL}/webauthn-connector.html`), + fetchCheckUrl(`${BASE_URL}/admin/does-not-exist`), + fetchCheckUrl(`${BASE_URL}/admin/diagnostics/http?code=404`), + fetchCheckUrl(`${BASE_URL}/admin/diagnostics/http?code=400`), + fetchCheckUrl(`${BASE_URL}/admin/diagnostics/http?code=401`), + fetchCheckUrl(`${BASE_URL}/admin/diagnostics/http?code=403`), + ]); + + const respErrorElm = document.getElementById("http-response-errors"); + + // Check and validate the default API header responses + let apiErrors = checkSecurityHeaders(apiConfig.headers); + if (apiErrors.length >= 1) { + respErrorElm.innerHTML += "API calls:
"; + apiErrors.forEach((errMsg) => { + respErrorElm.innerHTML += `Header: ${errMsg}
`; + }); + } + + // Check the special `-connector.html` headers, these should have some headers omitted. + const omitConnectorHeaders = ["x-frame-options", "content-security-policy"]; + let connectorErrors = checkSecurityHeaders(webauthnConnector.headers, omitConnectorHeaders); + omitConnectorHeaders.forEach((header) => { + if (webauthnConnector.headers.get(header) !== null) { + connectorErrors.push(`'${header}' is present while it should not`); + } + }); + if (connectorErrors.length >= 1) { + respErrorElm.innerHTML += "2FA Connector calls:
"; + connectorErrors.forEach((errMsg) => { + respErrorElm.innerHTML += `Header: ${errMsg}
`; + }); + } + + // Check specific error code responses if they are not re-written by a reverse proxy + let responseErrors = []; + if (notFound.status !== 404 || notFound.text.indexOf("return to the web-vault") === -1) { + responseErrors.push("404 (Not Found) HTML is invalid"); + } + + if (notFoundApi.status !== 404 || notFoundApi.text.indexOf("\"message\":\"Testing error 404 response\",") === -1) { + responseErrors.push("404 (Not Found) JSON is invalid"); + } + + if (badRequest.status !== 400 || badRequest.text.indexOf("\"message\":\"Testing error 400 response\",") === -1) { + responseErrors.push("400 (Bad Request) is invalid"); + } + + if (unauthorized.status !== 401 || unauthorized.text.indexOf("\"message\":\"Testing error 401 response\",") === -1) { + responseErrors.push("401 (Unauthorized) is invalid"); + } + + if (forbidden.status !== 403 || forbidden.text.indexOf("\"message\":\"Testing error 403 response\",") === -1) { + responseErrors.push("403 (Forbidden) is invalid"); + } + + if (responseErrors.length >= 1) { + respErrorElm.innerHTML += "HTTP error responses:
"; + responseErrors.forEach((errMsg) => { + respErrorElm.innerHTML += `Response to: ${errMsg}
`; + }); + } + + if (responseErrors.length >= 1 || connectorErrors.length >= 1 || apiErrors.length >= 1) { + document.getElementById("http-response-warning").classList.remove("d-none"); + } else { + httpResponseCheck = true; + document.getElementById("http-response-success").classList.remove("d-none"); + } +} + +async function fetchWsUrl(wsUrl) { + return new Promise((resolve, reject) => { + try { + const ws = new WebSocket(wsUrl); + ws.onopen = () => { + ws.close(); + resolve(true); + }; + + ws.onerror = () => { + reject(false); + }; + } catch (_) { + reject(false); + } + }); +} + +async function checkWebsocketConnection() { + // Test Websocket connections via the anonymous (login with device) connection + const isConnected = await fetchWsUrl(`${BASE_URL}/notifications/anonymous-hub?token=admin-diagnostics`).catch(() => false); + if (isConnected) { + websocketCheck = true; + document.getElementById("websocket-success").classList.remove("d-none"); + } else { + document.getElementById("websocket-error").classList.remove("d-none"); + } +} + function init(dj) { // Time check document.getElementById("time-browser-string").textContent = browserUTC; @@ -225,6 +404,12 @@ function init(dj) { // DNS Check checkDns(dj.dns_resolved); + + checkHttpResponse(); + + if (dj.enable_websocket) { + checkWebsocketConnection(); + } } // onLoad events diff --git a/src/static/scripts/admin_users.js b/src/static/scripts/admin_users.js index c2462521..54fdedf2 100644 --- a/src/static/scripts/admin_users.js +++ b/src/static/scripts/admin_users.js @@ -152,7 +152,7 @@ const ORG_TYPES = { "name": "User", "bg": "blue" }, - "3": { + "4": { "name": "Manager", "bg": "green" }, diff --git a/src/static/scripts/datatables.css b/src/static/scripts/datatables.css index 878e2347..195caa84 100644 --- a/src/static/scripts/datatables.css +++ b/src/static/scripts/datatables.css @@ -4,10 +4,10 @@ * * To rebuild or modify this file with the latest versions of the included * software please visit: - * https://datatables.net/download/#bs5/dt-2.0.8 + * https://datatables.net/download/#bs5/dt-2.1.8 * * Included libraries: - * DataTables 2.0.8 + * DataTables 2.1.8 */ @charset "UTF-8"; @@ -45,15 +45,21 @@ table.dataTable tr.dt-hasChild td.dt-control:before { } html.dark table.dataTable td.dt-control:before, -:root[data-bs-theme=dark] table.dataTable td.dt-control:before { +:root[data-bs-theme=dark] table.dataTable td.dt-control:before, +:root[data-theme=dark] table.dataTable td.dt-control:before { border-left-color: rgba(255, 255, 255, 0.5); } html.dark table.dataTable tr.dt-hasChild td.dt-control:before, -:root[data-bs-theme=dark] table.dataTable tr.dt-hasChild td.dt-control:before { +:root[data-bs-theme=dark] table.dataTable tr.dt-hasChild td.dt-control:before, +:root[data-theme=dark] table.dataTable tr.dt-hasChild td.dt-control:before { border-top-color: rgba(255, 255, 255, 0.5); border-left-color: transparent; } +div.dt-scroll { + width: 100%; +} + div.dt-scroll-body thead tr, div.dt-scroll-body tfoot tr { height: 0; @@ -377,6 +383,31 @@ table.table.dataTable.table-hover > tbody > tr.selected:hover > * { box-shadow: inset 0 0 0 9999px rgba(var(--dt-row-selected), 0.975); } +div.dt-container div.dt-layout-start > *:not(:last-child) { + margin-right: 1em; +} +div.dt-container div.dt-layout-end > *:not(:first-child) { + margin-left: 1em; +} +div.dt-container div.dt-layout-full { + width: 100%; +} +div.dt-container div.dt-layout-full > *:only-child { + margin-left: auto; + margin-right: auto; +} +div.dt-container div.dt-layout-table > div { + display: block !important; +} + +@media screen and (max-width: 767px) { + div.dt-container div.dt-layout-start > *:not(:last-child) { + margin-right: 0; + } + div.dt-container div.dt-layout-end > *:not(:first-child) { + margin-left: 0; + } +} div.dt-container div.dt-length label { font-weight: normal; text-align: left; @@ -400,9 +431,6 @@ div.dt-container div.dt-search input { display: inline-block; width: auto; } -div.dt-container div.dt-info { - padding-top: 0.85em; -} div.dt-container div.dt-paging { margin: 0; } diff --git a/src/static/scripts/datatables.js b/src/static/scripts/datatables.js index 3d22cbde..d0361b54 100644 --- a/src/static/scripts/datatables.js +++ b/src/static/scripts/datatables.js @@ -4,20 +4,20 @@ * * To rebuild or modify this file with the latest versions of the included * software please visit: - * https://datatables.net/download/#bs5/dt-2.0.8 + * https://datatables.net/download/#bs5/dt-2.1.8 * * Included libraries: - * DataTables 2.0.8 + * DataTables 2.1.8 */ -/*! DataTables 2.0.8 +/*! DataTables 2.1.8 * © SpryMedia Ltd - datatables.net/license */ /** * @summary DataTables * @description Paginate, search and order HTML tables - * @version 2.0.8 + * @version 2.1.8 * @author SpryMedia Ltd * @contact www.datatables.net * @copyright SpryMedia Ltd. @@ -116,7 +116,6 @@ var i=0, iLen; var sId = this.getAttribute( 'id' ); - var bInitHandedOff = false; var defaults = DataTable.defaults; var $this = $(this); @@ -266,6 +265,8 @@ "rowId", "caption", "layout", + "orderDescReverse", + "typeDetect", [ "iCookieDuration", "iStateDuration" ], // backwards compat [ "oSearch", "oPreviousSearch" ], [ "aoSearchCols", "aoPreSearchCols" ], @@ -312,38 +313,14 @@ oSettings._iDisplayStart = oInit.iDisplayStart; } - /* Language definitions */ - var oLanguage = oSettings.oLanguage; - $.extend( true, oLanguage, oInit.oLanguage ); - - if ( oLanguage.sUrl ) + var defer = oInit.iDeferLoading; + if ( defer !== null ) { - /* Get the language definitions from a file - because this Ajax call makes the language - * get async to the remainder of this function we use bInitHandedOff to indicate that - * _fnInitialise will be fired by the returned Ajax handler, rather than the constructor - */ - $.ajax( { - dataType: 'json', - url: oLanguage.sUrl, - success: function ( json ) { - _fnCamelToHungarian( defaults.oLanguage, json ); - $.extend( true, oLanguage, json, oSettings.oInit.oLanguage ); + oSettings.deferLoading = true; - _fnCallbackFire( oSettings, null, 'i18n', [oSettings], true); - _fnInitialise( oSettings ); - }, - error: function () { - // Error occurred loading language file - _fnLog( oSettings, 0, 'i18n file loading error', 21 ); - - // continue on as best we can - _fnInitialise( oSettings ); - } - } ); - bInitHandedOff = true; - } - else { - _fnCallbackFire( oSettings, null, 'i18n', [oSettings]); + var tmp = Array.isArray(defer); + oSettings._iRecordsDisplay = tmp ? defer[0] : defer; + oSettings._iRecordsTotal = tmp ? defer[1] : defer; } /* @@ -410,113 +387,112 @@ } ); } - var features = oSettings.oFeatures; - var loadedInit = function () { - /* - * Sorting - * @todo For modularisation (1.11) this needs to do into a sort start up handler - */ - - // If aaSorting is not defined, then we use the first indicator in asSorting - // in case that has been altered, so the default sort reflects that option - if ( oInit.aaSorting === undefined ) { - var sorting = oSettings.aaSorting; - for ( i=0, iLen=sorting.length ; i').appendTo( $this ); - } - - caption.html( oSettings.caption ); - } - - // Store the caption side, so we can remove the element from the document - // when creating the element - if (caption.length) { - caption[0]._captionSide = caption.css('caption-side'); - oSettings.captionNode = caption[0]; - } - - if ( thead.length === 0 ) { - thead = $('').appendTo($this); - } - oSettings.nTHead = thead[0]; - $('tr', thead).addClass(oClasses.thead.row); - - var tbody = $this.children('tbody'); - if ( tbody.length === 0 ) { - tbody = $('').insertAfter(thead); - } - oSettings.nTBody = tbody[0]; - - var tfoot = $this.children('tfoot'); - if ( tfoot.length === 0 ) { - // If we are a scrolling table, and no footer has been given, then we need to create - // a tfoot element for the caption element to be appended to - tfoot = $('').appendTo($this); - } - oSettings.nTFoot = tfoot[0]; - $('tr', tfoot).addClass(oClasses.tfoot.row); - - // Check if there is data passing into the constructor - if ( oInit.aaData ) { - for ( i=0 ; i').appendTo( $this ); + } + + caption.html( oSettings.caption ); + } + + // Store the caption side, so we can remove the element from the document + // when creating the element + if (caption.length) { + caption[0]._captionSide = caption.css('caption-side'); + oSettings.captionNode = caption[0]; + } + + if ( thead.length === 0 ) { + thead = $('').appendTo($this); + } + oSettings.nTHead = thead[0]; + $('tr', thead).addClass(oClasses.thead.row); + + var tbody = $this.children('tbody'); + if ( tbody.length === 0 ) { + tbody = $('').insertAfter(thead); + } + oSettings.nTBody = tbody[0]; + + var tfoot = $this.children('tfoot'); + if ( tfoot.length === 0 ) { + // If we are a scrolling table, and no footer has been given, then we need to create + // a tfoot element for the caption element to be appended to + tfoot = $('').appendTo($this); + } + oSettings.nTFoot = tfoot[0]; + $('tr', tfoot).addClass(oClasses.tfoot.row); + + // Copy the data index array + oSettings.aiDisplay = oSettings.aiDisplayMaster.slice(); + + // Initialisation complete - table can be drawn + oSettings.bInitialised = true; + + // Language definitions + var oLanguage = oSettings.oLanguage; + $.extend( true, oLanguage, oInit.oLanguage ); + + if ( oLanguage.sUrl ) { + // Get the language definitions from a file + $.ajax( { + dataType: 'json', + url: oLanguage.sUrl, + success: function ( json ) { + _fnCamelToHungarian( defaults.oLanguage, json ); + $.extend( true, oLanguage, json, oSettings.oInit.oLanguage ); + + _fnCallbackFire( oSettings, null, 'i18n', [oSettings], true); + _fnInitialise( oSettings ); + }, + error: function () { + // Error occurred loading language file + _fnLog( oSettings, 0, 'i18n file loading error', 21 ); + + // Continue on as best we can + _fnInitialise( oSettings ); + } + } ); + } + else { + _fnCallbackFire( oSettings, null, 'i18n', [oSettings], true); + _fnInitialise( oSettings ); + } } ); _that = null; return this; @@ -563,7 +539,7 @@ * * @type string */ - builder: "bs5/dt-2.0.8", + builder: "bs5/dt-2.1.8", /** @@ -1033,6 +1009,15 @@ info: { container: 'dt-info' }, + layout: { + row: 'dt-layout-row', + cell: 'dt-layout-cell', + tableRow: 'dt-layout-table', + tableCell: '', + start: 'dt-layout-start', + end: 'dt-layout-end', + full: 'dt-layout-full' + }, length: { container: 'dt-length', select: 'dt-input' @@ -1081,7 +1066,8 @@ active: 'current', button: 'dt-paging-button', container: 'dt-paging', - disabled: 'disabled' + disabled: 'disabled', + nav: '' } } ); @@ -1156,7 +1142,7 @@ }; - var _isNumber = function ( d, decimalPoint, formatted ) { + var _isNumber = function ( d, decimalPoint, formatted, allowEmpty ) { var type = typeof d; var strType = type === 'string'; @@ -1167,7 +1153,7 @@ // If empty return immediately so there must be a number if it is a // formatted string (this stops the string "k", or "kr", etc being detected // as a formatted number for currency - if ( _empty( d ) ) { + if ( allowEmpty && _empty( d ) ) { return true; } @@ -1189,8 +1175,8 @@ }; // Is a string a number surrounded by HTML? - var _htmlNumeric = function ( d, decimalPoint, formatted ) { - if ( _empty( d ) ) { + var _htmlNumeric = function ( d, decimalPoint, formatted, allowEmpty ) { + if ( allowEmpty && _empty( d ) ) { return true; } @@ -1202,7 +1188,7 @@ var html = _isHtml( d ); return ! html ? null : - _isNumber( _stripHtml( d ), decimalPoint, formatted ) ? + _isNumber( _stripHtml( d ), decimalPoint, formatted, allowEmpty ) ? true : null; }; @@ -1244,7 +1230,7 @@ // is essential here if ( prop2 !== undefined ) { for ( ; i _max_str_len) { throw new Error('Exceeded max str len'); @@ -1340,8 +1330,11 @@ } // It is faster to just run `normalize` than it is to check if - // we need to with a regex! - var res = str.normalize("NFD"); + // we need to with a regex! (Check as it isn't available in old + // Safari) + var res = str.normalize + ? str.normalize("NFD") + : str; // Equally, here we check if a regex is needed or not return res.length !== str.length @@ -2264,6 +2257,21 @@ return a; } + /** + * Allow the result from a type detection function to be `true` while + * translating that into a string. Old type detection functions will + * return the type name if it passes. An obect store would be better, + * but not backwards compatible. + * + * @param {*} typeDetect Object or function for type detection + * @param {*} res Result from the type detection function + * @returns Type name or false + */ + function _typeResult (typeDetect, res) { + return res === true + ? typeDetect._name + : res; + } /** * Calculate the 'type' of a column @@ -2278,7 +2286,7 @@ var i, ien, j, jen, k, ken; var col, detectedType, cache; - // For each column, spin over the + // For each column, spin over the data type detection functions, seeing if one matches for ( i=0, ien=columns.length ; i col is set to and correct if needed - for (var i=0 ; i col is set to and correct if needed + for (var i=0 ; i 0 ? idx : null; + } + + // `:visible` on its own + return idx; } ); case 'name': @@ -9215,23 +9404,60 @@ } ); /** - * Set the jQuery or window object to be used by DataTables - * - * @param {*} module Library / container object - * @param {string} [type] Library or container type `lib`, `win` or `datetime`. - * If not provided, automatic detection is attempted. + * Set the libraries that DataTables uses, or the global objects. + * Note that the arguments can be either way around (legacy support) + * and the second is optional. See docs. */ - DataTable.use = function (module, type) { - if (type === 'lib' || module.fn) { + DataTable.use = function (arg1, arg2) { + // Reverse arguments for legacy support + var module = typeof arg1 === 'string' + ? arg2 + : arg1; + var type = typeof arg2 === 'string' + ? arg2 + : arg1; + + // Getter + if (module === undefined && typeof type === 'string') { + switch (type) { + case 'lib': + case 'jq': + return $; + + case 'win': + return window; + + case 'datetime': + return DataTable.DateTime; + + case 'luxon': + return __luxon; + + case 'moment': + return __moment; + + default: + return null; + } + } + + // Setter + if (type === 'lib' || type === 'jq' || (module && module.fn && module.fn.jquery)) { $ = module; } - else if (type == 'win' || module.document) { + else if (type == 'win' || (module && module.document)) { window = module; document = module.document; } - else if (type === 'datetime' || module.type === 'DateTime') { + else if (type === 'datetime' || (module && module.type === 'DateTime')) { DataTable.DateTime = module; } + else if (type === 'luxon' || (module && module.FixedOffsetZone)) { + __luxon = module; + } + else if (type === 'moment' || (module && module.isMoment)) { + __moment = module; + } } /** @@ -9487,7 +9713,7 @@ fn.call(this); } else { - this.on('init', function () { + this.on('init.dt.DT', function () { fn.call(this); }); } @@ -9640,7 +9866,7 @@ * @type string * @default Version number */ - DataTable.version = "2.0.8"; + DataTable.version = "2.1.8"; /** * Private data store, containing all of the settings objects that are @@ -10485,7 +10711,8 @@ first: 'First', last: 'Last', next: 'Next', - previous: 'Previous' + previous: 'Previous', + number: '' } }, @@ -10665,6 +10892,10 @@ }, + /** The initial data order is reversed when `desc` ordering */ + orderDescReverse: true, + + /** * This parameter allows you to have define the global filtering state at * initialisation time. As an object the `search` parameter must be @@ -10713,7 +10944,7 @@ * * `full_numbers` - 'First', 'Previous', 'Next' and 'Last' buttons, plus page numbers * * `first_last_numbers` - 'First' and 'Last' buttons, plus page numbers */ - "sPaginationType": "full_numbers", + "sPaginationType": "", /** @@ -10783,7 +11014,13 @@ /** * Caption value */ - "caption": null + "caption": null, + + + /** + * For server-side processing - use the data from the DOM for the first draw + */ + iDeferLoading: null }; _fnHungarianMap( DataTable.defaults ); @@ -11726,7 +11963,13 @@ captionNode: null, - colgroup: null + colgroup: null, + + /** Delay loading of data */ + deferLoading: null, + + /** Allow auto type detection */ + typeDetect: true }; /** @@ -11750,7 +11993,7 @@ }, full: function () { - return [ 'first', 'previous', 'next', 'last' ]; + return [ 'first', 'previous', 'next', 'last' ]; }, numbers: function () { @@ -11764,11 +12007,11 @@ full_numbers: function () { return [ 'first', 'previous', 'numbers', 'next', 'last' ]; }, - + first_last: function () { return ['first', 'last']; }, - + first_last_numbers: function () { return ['first', 'numbers', 'last']; }, @@ -11850,38 +12093,56 @@ * to make working with DataTables a little bit easier. */ - function __mldFnName(name) { - return name.replace(/[\W]/g, '_') - } - - // Common logic for moment, luxon or a date action - function __mld( dt, momentFn, luxonFn, dateFn, arg1 ) { - if (window.moment) { - return dt[momentFn]( arg1 ); + /** + * Common logic for moment, luxon or a date action. + * + * Happens after __mldObj, so don't need to call `resolveWindowsLibs` again + */ + function __mld( dtLib, momentFn, luxonFn, dateFn, arg1 ) { + if (__moment) { + return dtLib[momentFn]( arg1 ); } - else if (window.luxon) { - return dt[luxonFn]( arg1 ); + else if (__luxon) { + return dtLib[luxonFn]( arg1 ); } - return dateFn ? dt[dateFn]( arg1 ) : dt; + return dateFn ? dtLib[dateFn]( arg1 ) : dtLib; } var __mlWarning = false; + var __luxon; // Can be assigned in DateTeble.use() + var __moment; // Can be assigned in DateTeble.use() + + /** + * + */ + function resolveWindowLibs() { + if (window.luxon && ! __luxon) { + __luxon = window.luxon; + } + + if (window.moment && ! __moment) { + __moment = window.moment; + } + } + function __mldObj (d, format, locale) { var dt; - if (window.moment) { - dt = window.moment.utc( d, format, locale, true ); + resolveWindowLibs(); + + if (__moment) { + dt = __moment.utc( d, format, locale, true ); if (! dt.isValid()) { return null; } } - else if (window.luxon) { + else if (__luxon) { dt = format && typeof d === 'string' - ? window.luxon.DateTime.fromFormat( d, format ) - : window.luxon.DateTime.fromISO( d ); + ? __luxon.DateTime.fromFormat( d, format ) + : __luxon.DateTime.fromISO( d ); if (! dt.isValid) { return null; @@ -11926,11 +12187,11 @@ from = null; } - var typeName = 'datetime' + (to ? '-' + __mldFnName(to) : ''); + var typeName = 'datetime' + (to ? '-' + to : ''); // Add type detection and sorting specific to this date format - we need to be able to identify // date type columns as such, rather than as numbers in extensions. Hence the need for this. - if (! DataTable.ext.type.order[typeName]) { + if (! DataTable.ext.type.order[typeName + '-pre']) { DataTable.type(typeName, { detect: function (d) { // The renderer will give the value to type detect as the type! @@ -12029,7 +12290,7 @@ // Formatted date time detection - use by declaring the formats you are going to use DataTable.datetime = function ( format, locale ) { - var typeName = 'datetime-detect-' + __mldFnName(format); + var typeName = 'datetime-' + format; if (! locale) { locale = 'en'; @@ -12169,7 +12430,7 @@ return { className: _extTypes.className[name], detect: _extTypes.detect.find(function (fn) { - return fn.name === name; + return fn._name === name; }), order: { pre: _extTypes.order[name + '-pre'], @@ -12184,27 +12445,20 @@ var setProp = function(prop, propVal) { _extTypes[prop][name] = propVal; }; - var setDetect = function (fn) { - // Wrap to allow the function to return `true` rather than - // specifying the type name. - var cb = function (d, s) { - var ret = fn(d, s); + var setDetect = function (detect) { + // `detect` can be a function or an object - we set a name + // property for either - that is used for the detection + Object.defineProperty(detect, "_name", {value: name}); - return ret === true - ? name - : ret; - }; - Object.defineProperty(cb, "name", {value: name}); - - var idx = _extTypes.detect.findIndex(function (fn) { - return fn.name === name; + var idx = _extTypes.detect.findIndex(function (item) { + return item._name === name; }); if (idx === -1) { - _extTypes.detect.unshift(cb); + _extTypes.detect.unshift(detect); } else { - _extTypes.detect.splice(idx, 1, cb); + _extTypes.detect.splice(idx, 1, detect); } }; var setOrder = function (obj) { @@ -12260,10 +12514,23 @@ // Get a list of types DataTable.types = function () { return _extTypes.detect.map(function (fn) { - return fn.name; + return fn._name; }); }; + var __diacriticSort = function (a, b) { + a = a !== null && a !== undefined ? a.toString().toLowerCase() : ''; + b = b !== null && b !== undefined ? b.toString().toLowerCase() : ''; + + // Checked for `navigator.languages` support in `oneOf` so this code can't execute in old + // Safari and thus can disable this check + // eslint-disable-next-line compat/compat + return a.localeCompare(b, navigator.languages[0] || navigator.language, { + numeric: true, + ignorePunctuation: true, + }); + } + // // Built in data types // @@ -12276,7 +12543,7 @@ pre: function ( a ) { // This is a little complex, but faster than always calling toString, // http://jsperf.com/tostring-v-check - return _empty(a) ? + return _empty(a) && typeof a !== 'boolean' ? '' : typeof a === 'string' ? a.toLowerCase() : @@ -12288,11 +12555,38 @@ search: _filterString(false, true) }); + DataTable.type('string-utf8', { + detect: { + allOf: function ( d ) { + return true; + }, + oneOf: function ( d ) { + // At least one data point must contain a non-ASCII character + // This line will also check if navigator.languages is supported or not. If not (Safari 10.0-) + // this data type won't be supported. + // eslint-disable-next-line compat/compat + return ! _empty( d ) && navigator.languages && typeof d === 'string' && d.match(/[^\x00-\x7F]/); + } + }, + order: { + asc: __diacriticSort, + desc: function (a, b) { + return __diacriticSort(a, b) * -1; + } + }, + search: _filterString(false, true) + }); + DataTable.type('html', { - detect: function ( d ) { - return _empty( d ) || (typeof d === 'string' && d.indexOf('<') !== -1) ? - 'html' : null; + detect: { + allOf: function ( d ) { + return _empty( d ) || (typeof d === 'string' && d.indexOf('<') !== -1); + }, + oneOf: function ( d ) { + // At least one data point must contain a `<` + return ! _empty( d ) && typeof d === 'string' && d.indexOf('<') !== -1; + } }, order: { pre: function ( a ) { @@ -12309,16 +12603,21 @@ DataTable.type('date', { className: 'dt-type-date', - detect: function ( d ) - { - // V8 tries _very_ hard to make a string passed into `Date.parse()` - // valid, so we need to use a regex to restrict date formats. Use a - // plug-in for anything other than ISO8601 style strings - if ( d && !(d instanceof Date) && ! _re_date.test(d) ) { - return null; + detect: { + allOf: function ( d ) { + // V8 tries _very_ hard to make a string passed into `Date.parse()` + // valid, so we need to use a regex to restrict date formats. Use a + // plug-in for anything other than ISO8601 style strings + if ( d && !(d instanceof Date) && ! _re_date.test(d) ) { + return null; + } + var parsed = Date.parse(d); + return (parsed !== null && !isNaN(parsed)) || _empty(d); + }, + oneOf: function ( d ) { + // At least one entry must be a date or a string with a date + return (d instanceof Date) || (typeof d === 'string' && _re_date.test(d)); } - var parsed = Date.parse(d); - return (parsed !== null && !isNaN(parsed)) || _empty(d) ? 'date' : null; }, order: { pre: function ( d ) { @@ -12331,10 +12630,16 @@ DataTable.type('html-num-fmt', { className: 'dt-type-numeric', - detect: function ( d, settings ) - { - var decimal = settings.oLanguage.sDecimal; - return _htmlNumeric( d, decimal, true ) ? 'html-num-fmt' : null; + detect: { + allOf: function ( d, settings ) { + var decimal = settings.oLanguage.sDecimal; + return _htmlNumeric( d, decimal, true, false ); + }, + oneOf: function (d, settings) { + // At least one data point must contain a numeric value + var decimal = settings.oLanguage.sDecimal; + return _htmlNumeric( d, decimal, true, false ); + } }, order: { pre: function ( d, s ) { @@ -12348,10 +12653,16 @@ DataTable.type('html-num', { className: 'dt-type-numeric', - detect: function ( d, settings ) - { - var decimal = settings.oLanguage.sDecimal; - return _htmlNumeric( d, decimal ) ? 'html-num' : null; + detect: { + allOf: function ( d, settings ) { + var decimal = settings.oLanguage.sDecimal; + return _htmlNumeric( d, decimal, false, true ); + }, + oneOf: function (d, settings) { + // At least one data point must contain a numeric value + var decimal = settings.oLanguage.sDecimal; + return _htmlNumeric( d, decimal, false, false ); + } }, order: { pre: function ( d, s ) { @@ -12365,10 +12676,16 @@ DataTable.type('num-fmt', { className: 'dt-type-numeric', - detect: function ( d, settings ) - { - var decimal = settings.oLanguage.sDecimal; - return _isNumber( d, decimal, true ) ? 'num-fmt' : null; + detect: { + allOf: function ( d, settings ) { + var decimal = settings.oLanguage.sDecimal; + return _isNumber( d, decimal, true, true ); + }, + oneOf: function (d, settings) { + // At least one data point must contain a numeric value + var decimal = settings.oLanguage.sDecimal; + return _isNumber( d, decimal, true, false ); + } }, order: { pre: function ( d, s ) { @@ -12381,10 +12698,16 @@ DataTable.type('num', { className: 'dt-type-numeric', - detect: function ( d, settings ) - { - var decimal = settings.oLanguage.sDecimal; - return _isNumber( d, decimal ) ? 'num' : null; + detect: { + allOf: function ( d, settings ) { + var decimal = settings.oLanguage.sDecimal; + return _isNumber( d, decimal, false, true ); + }, + oneOf: function (d, settings) { + // At least one data point must contain a numeric value + var decimal = settings.oLanguage.sDecimal; + return _isNumber( d, decimal, false, false ); + } }, order: { pre: function (d, s) { @@ -12468,11 +12791,18 @@ // `DT` namespace will allow the event to be removed automatically // on destroy, while the `dt` namespaced event is the one we are // listening for - $(settings.nTable).on( 'order.dt.DT', function ( e, ctx, sorting ) { + $(settings.nTable).on( 'order.dt.DT column-visibility.dt.DT', function ( e, ctx ) { if ( settings !== ctx ) { // need to check this this is the host return; // table, not a nested one } + var sorting = ctx.sortDetails; + + if (! sorting) { + return; + } + + var i; var orderClasses = classes.order; var columns = ctx.api.columns( cell ); var col = settings.aoColumns[columns.flatten()[0]]; @@ -12480,9 +12810,7 @@ var ariaType = ''; var indexes = columns.indexes(); var sortDirs = columns.orderable(true).flatten(); - var orderedColumns = ',' + sorting.map( function (val) { - return val.col; - } ).join(',') + ','; + var orderedColumns = _pluck(sorting, 'col'); cell .removeClass( @@ -12492,10 +12820,18 @@ .toggleClass( orderClasses.none, ! orderable ) .toggleClass( orderClasses.canAsc, orderable && sortDirs.includes('asc') ) .toggleClass( orderClasses.canDesc, orderable && sortDirs.includes('desc') ); - - var sortIdx = orderedColumns.indexOf( ',' + indexes.toArray().join(',') + ',' ); - if ( sortIdx !== -1 ) { + // Determine if all of the columns that this cell covers are included in the + // current ordering + var isOrdering = true; + + for (i=0; i') - .addClass('dt-layout-row') + .attr('id', items.id || null) + .addClass(items.className || classes.row) .appendTo( container ); $.each( items, function (key, val) { - var klass = ! val.table ? - 'dt-'+key+' ' : - ''; + if (key === 'id' || key === 'className') { + return; + } + + var klass = ''; if (val.table) { - row.addClass('dt-layout-table'); + row.addClass(classes.tableRow); + klass += classes.tableCell + ' '; + } + + if (key === 'start') { + klass += classes.start; + } + else if (key === 'end') { + klass += classes.end; + } + else { + klass += classes.full; } $('
') .attr({ id: val.id || null, - "class": 'dt-layout-cell '+klass+(val.className || '') + "class": val.className + ? val.className + : classes.cell + ' ' + klass }) .append( val.contents ) .appendTo( row ); @@ -12576,6 +12941,25 @@ } }; + function _divProp(el, prop, val) { + if (val) { + el[prop] = val; + } + } + + DataTable.feature.register( 'div', function ( settings, opts ) { + var n = $('
')[0]; + + if (opts) { + _divProp(n, 'className', opts.className); + _divProp(n, 'id', opts.id); + _divProp(n, 'innerHTML', opts.html); + _divProp(n, 'textContent', opts.text); + } + + return n; + } ); + DataTable.feature.register( 'info', function ( settings, opts ) { // For compatibility with the legacy `info` top level option if (! settings.oFeatures.bInfo) { @@ -12675,6 +13059,7 @@ opts = $.extend({ placeholder: language.sSearchPlaceholder, + processing: false, text: language.sSearch }, opts); @@ -12718,13 +13103,15 @@ /* Now do the filter */ if ( val != previousSearch.search ) { - previousSearch.search = val; - - _fnFilterComplete( settings, previousSearch ); - - // Need to redraw, without resorting - settings._iDisplayStart = 0; - _fnDraw( settings ); + _fnProcessingRun(settings, opts.processing, function () { + previousSearch.search = val; + + _fnFilterComplete( settings, previousSearch ); + + // Need to redraw, without resorting + settings._iDisplayStart = 0; + _fnDraw( settings ); + }); } }; @@ -12782,17 +13169,21 @@ opts = $.extend({ buttons: DataTable.ext.pager.numbers_length, type: settings.sPaginationType, - boundaryNumbers: true + boundaryNumbers: true, + firstLast: true, + previousNext: true, + numbers: true }, opts); - // To be removed in 2.1 - if (opts.numbers) { - opts.buttons = opts.numbers; - } - - var host = $('
').addClass( settings.oClasses.paging.container + ' paging_' + opts.type ); + var host = $('
') + .addClass(settings.oClasses.paging.container + (opts.type ? ' paging_' + opts.type : '')) + .append( + $('