diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 16704cd6fcc8..73ff39055e15 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,7 +2,7 @@ # Overview # # Pattern used to match files follows most of the same rules as used in gitignore files. Order is -# important; the last matching pattern takes precendence. +# important; the last matching pattern takes precedence. # # For more info see: # https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners @@ -30,7 +30,6 @@ # Core Clients (@keycloak/core-clients-maintainers) ################################################################################################### -/js/libs/keycloak-js/ @keycloak/core-clients-maintainers ################################################################################################### # Cloud Native (@keycloak/cloud-native-maintainers) @@ -47,7 +46,17 @@ ################################################################################################### /themes/ @keycloak/ui-maintainers @keycloak/maintainers -/js/ @keycloak/ui-maintainers -/js/**/maven-resources-community/**/messages_*.properties @keycloak/ui-maintainers @keycloak/maintainers -/adapters/oidc/js/ @keycloak/ui-maintainers -/rest/admin-ui-ext/ @keycloak/ui-maintainers +/js/ @keycloak/ui-maintainers @keycloak/maintainers +/js/**/messages_*.properties @keycloak/ui-maintainers @keycloak/maintainers +/adapters/oidc/js/ @keycloak/ui-maintainers @keycloak/maintainers +/rest/admin-ui-ext/ @keycloak/ui-maintainers @keycloak/maintainers + +################################################################################################### +# SRE (@keycloak/sre-maintainers) +################################################################################################### + +/model/infinispan/ @keycloak/sre-maintainers @keycloak/maintainers +/tests/clustering/ @keycloak/sre-maintainers @keycloak/maintainers +/test-framework/clustering/ @keycloak/sre-maintainers @keycloak/maintainers +/docs/guides/high-availability/ @keycloak/sre-maintainers @keycloak/maintainers +/docs/guides/observability/ @keycloak/sre-maintainers @keycloak/maintainers \ No newline at end of file diff --git a/.github/actions/archive-surefire-reports/action.yml b/.github/actions/archive-surefire-reports/action.yml index 91b1c525477a..ddc0acb89d94 100644 --- a/.github/actions/archive-surefire-reports/action.yml +++ b/.github/actions/archive-surefire-reports/action.yml @@ -37,7 +37,7 @@ runs: - id: upload-surefire-linux name: Upload Surefire reports if: (!cancelled() && contains(fromJSON(inputs.release-branches), github.ref) && contains(fromJSON('["push", "workflow_dispatch"]'), github.event_name)) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: surefire-${{ inputs.job-id }} path: | diff --git a/.github/actions/build-keycloak/action.yml b/.github/actions/build-keycloak/action.yml index 0e59b1b0393f..481a1f34537d 100644 --- a/.github/actions/build-keycloak/action.yml +++ b/.github/actions/build-keycloak/action.yml @@ -49,7 +49,7 @@ runs: - id: upload-keycloak-maven-repository name: Upload Keycloak Maven artifacts if: inputs.upload-m2-repo == 'true' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: m2-keycloak.tzts path: m2-keycloak.tzts @@ -58,7 +58,7 @@ runs: - id: upload-keycloak-dist name: Upload Keycloak dist if: inputs.upload-dist == 'true' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: keycloak-dist path: quarkus/dist/target/keycloak*.tar.gz diff --git a/.github/actions/conditional/conditions b/.github/actions/conditional/conditions index a75129ca99fc..88e1b2b7b6d4 100644 --- a/.github/actions/conditional/conditions +++ b/.github/actions/conditional/conditions @@ -3,6 +3,7 @@ # To test a pattern run '.github/actions/conditional/conditional.sh ' .github/actions/ ci ci-quarkus ci-store ci-sssd operator js codeql-java codeql-javascript codeql-typescript guides documentation +.github/fake_fips/ ci .github/scripts/ ci ci-quarkus ci-sssd .github/scripts/ansible/ ci-store .github/scripts/aws/ ci-store @@ -35,9 +36,9 @@ docs/documentation/ documentation js/ js rest/admin-ui-ext/ js services/ js +themes/ js js/apps/account-ui/ ci ci-webauthn js/libs/ui-shared/ ci ci-webauthn -js/libs/keycloak-js/ ci ci-quarkus # The sections below contain a sub-set of files existing in the project which are supported languages by CodeQL. # See: https://codeql.github.com/docs/codeql-overview/supported-languages-and-frameworks/ diff --git a/.github/actions/cypress-cache/action.yml b/.github/actions/cypress-cache/action.yml deleted file mode 100644 index 994192cfb0d3..000000000000 --- a/.github/actions/cypress-cache/action.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: Cache Cypress -description: Caches Cypress binary to speed up the build. - -runs: - using: composite - steps: - - id: cache-key - name: Cache key based on Cypress version - shell: bash - run: echo "key=cypress-binary-$(jq -r '.devDependencies.cypress' js/apps/admin-ui/package.json)" >> $GITHUB_OUTPUT - - - uses: actions/cache@v4 - name: Cache Cypress binary - with: - # See: https://docs.cypress.io/app/references/advanced-installation#Binary-cache - path: | - ~/.cache/Cypress - /AppData/Local/Cypress/Cache - ~/Library/Caches/Cypress - key: ${{ runner.os }}-${{ steps.cache-key.outputs.key }} diff --git a/.github/actions/install-chrome/action.yml b/.github/actions/install-chrome/action.yml new file mode 100644 index 000000000000..c880cddf9345 --- /dev/null +++ b/.github/actions/install-chrome/action.yml @@ -0,0 +1,39 @@ +name: Install Chrome browser and driver for Testing +description: Download and install the compatible Chrome and Chromedriver + +inputs: + version: + description: The version of Chrome and Chromedriver to install. By default none is installed. + required: false + default: default # E.g. 135.0.7049.84 (fixed version), default (chrome provided by GHA box) + +runs: + using: composite + steps: + + - id: install-chrome + name: Install Chrome + if: inputs.version != 'default' + shell: bash + run: | + sudo apt-get remove google-chrome-stable + wget http://dl.google.com/linux/chrome/deb/pool/main/g/google-chrome-stable/google-chrome-stable_${{ inputs.version }}-1_amd64.deb -O /tmp/google-chrome-stable.deb --no-verbose + sudo apt-get install -y /tmp/google-chrome-stable.deb + + - id: install-chromedriver + name: Install Chromedriver + if: inputs.version != 'default' + shell: bash + run: | + wget https://storage.googleapis.com/chrome-for-testing-public/${{ inputs.version }}/linux64/chromedriver-linux64.zip -O /tmp/chromedriver.zip --no-verbose + unzip -j /tmp/chromedriver.zip -d /tmp + sudo mv -f /tmp/chromedriver $CHROMEWEBDRIVER/chromedriver + sudo chmod +x $CHROMEWEBDRIVER/chromedriver + + - id: show-version + name: Show Version + if: inputs.version == 'default' + shell: bash + run: | + google-chrome --version + $CHROMEWEBDRIVER/chromedriver --version diff --git a/.github/actions/integration-test-setup/action.yml b/.github/actions/integration-test-setup/action.yml index 4ec5a39f9ccf..1a4a15218303 100644 --- a/.github/actions/integration-test-setup/action.yml +++ b/.github/actions/integration-test-setup/action.yml @@ -14,6 +14,10 @@ inputs: runs: using: composite steps: + - id: update-hosts + name: Update /etc/hosts + uses: ./.github/actions/update-hosts + - id: setup-java name: Setup Java uses: ./.github/actions/java-setup @@ -31,7 +35,7 @@ runs: - id: download-keycloak name: Download Keycloak Maven artifacts - uses: actions/download-artifact@v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 with: name: m2-keycloak.tzts diff --git a/.github/actions/java-setup/action.yml b/.github/actions/java-setup/action.yml index 4b1013ec9d8c..8f7f43e3c045 100644 --- a/.github/actions/java-setup/action.yml +++ b/.github/actions/java-setup/action.yml @@ -16,7 +16,7 @@ runs: steps: - id: setup-java name: Setup Java - uses: actions/setup-java@v4 + uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1 with: distribution: ${{ inputs.distribution }} java-version: ${{ inputs.java-version }} diff --git a/.github/actions/maven-cache/action.yml b/.github/actions/maven-cache/action.yml index fbfbfb2d3a72..f5a9a2873d34 100644 --- a/.github/actions/maven-cache/action.yml +++ b/.github/actions/maven-cache/action.yml @@ -19,7 +19,7 @@ runs: - id: cache-maven-repository name: Maven cache - uses: actions/cache@v4 + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 if: inputs.create-cache-if-it-doesnt-exist == 'true' with: # Two asterisks are needed to make the follow-up exclusion work @@ -31,20 +31,9 @@ runs: # Enable cross-os archive use the cache on both Linux and Windows enableCrossOsArchive: true - - shell: powershell - name: Link the cached Maven repository to the OS-dependent location - if: inputs.create-cache-if-it-doesnt-exist == 'false' && runner.os == 'Windows' - # The cache restore in the next step uses the relative path which was valid on Linux and that is part of the archive it downloads. - # You'll see that path when you enable debugging for the GitHub workflow on Windows. - # On Windows, the .m2 folder is in different location, so move all the contents to the right folder here. - # Also, not using the C: drive will speed up the build, see https://github.com/actions/runner-images/issues/8755 - run: | - mkdir -p ../../../.m2/repository - cmd /c mklink /d $HOME\.m2\repository D:\.m2\repository - - id: restore-maven-repository name: Maven cache - uses: actions/cache/restore@v4 + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 if: inputs.create-cache-if-it-doesnt-exist == 'false' with: # This needs to repeat the same path pattern as above to find the matching cache @@ -54,6 +43,15 @@ runs: key: ${{ steps.weekly-cache-key.outputs.key }} enableCrossOsArchive: true + - shell: bash + name: Copy restored maven repo to home folder in Windows + if: (steps.cache-maven-repository.outputs.cache-hit == 'true' || steps.restore-maven-repository.outputs.cache-hit == 'true') && runner.os == 'Windows' + run: | + if [ -d ../../../.m2/repository ]; then + cp -r ../../../.m2/repository ~/.m2 + rm -r ../../../.m2/repository + fi + - id: node-cache name: Node cache uses: ./.github/actions/node-cache diff --git a/.github/actions/node-cache/action.yml b/.github/actions/node-cache/action.yml index 583a9a62c214..6c2421294cc4 100644 --- a/.github/actions/node-cache/action.yml +++ b/.github/actions/node-cache/action.yml @@ -12,7 +12,7 @@ runs: echo "pnpm=$(cat js/pom.xml | grep '' | cut -d '>' -f 2 | cut -d '<' -f 1 | cut -c 1-)" >> $GITHUB_OUTPUT # Downloading Node.js often fails due to network issues, therefore we cache the artifacts downloaded by the frontend plugin. - - uses: actions/cache@v4 + - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 id: cache-binaries name: Cache Node.js and PNPM binaries with: diff --git a/.github/actions/pnpm-setup/action.yml b/.github/actions/pnpm-setup/action.yml index 350298a91390..6eb1853a97cf 100644 --- a/.github/actions/pnpm-setup/action.yml +++ b/.github/actions/pnpm-setup/action.yml @@ -11,7 +11,7 @@ runs: using: composite steps: - name: Set up Node.js - uses: actions/setup-node@v4 + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 with: node-version: ${{ inputs.node-version }} check-latest: true @@ -23,17 +23,7 @@ runs: - name: PNPM store cache uses: ./.github/actions/pnpm-store-cache - - name: Cypress binary cache - uses: ./.github/actions/cypress-cache - - name: Install dependencies shell: bash run: pnpm install --prefer-offline --frozen-lockfile working-directory: js - - # This step is only needed to ensure that the Cypress binary is installed. - # If the binary was retrieved from the cache, this step is a no-op. - - name: Install Cypress dependencies - shell: bash - working-directory: js/apps/admin-ui - run: pnpm exec cypress install diff --git a/.github/actions/pnpm-store-cache/action.yml b/.github/actions/pnpm-store-cache/action.yml index 3929d4824ec8..374cfcac2be2 100644 --- a/.github/actions/pnpm-store-cache/action.yml +++ b/.github/actions/pnpm-store-cache/action.yml @@ -9,7 +9,7 @@ runs: shell: bash run: echo "key=pnpm-store-`date -u "+%Y-%U"`" >> $GITHUB_OUTPUT - - uses: actions/cache@v4 + - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 name: Cache PNPM store with: # See: https://pnpm.io/npmrc#store-dir diff --git a/.github/actions/update-hosts/action.yml b/.github/actions/update-hosts/action.yml new file mode 100644 index 000000000000..d2c7f4289719 --- /dev/null +++ b/.github/actions/update-hosts/action.yml @@ -0,0 +1,21 @@ +name: Update /etc/hosts +description: Update /etc/hosts file to hardcode known nip.io hostnames. This is to avoid test instability due to DNS resolution issues. + +runs: + using: composite + steps: + + - id: update-hosts-linux + name: Update /etc/hosts + if: runner.os == 'Linux' + shell: bash + run: | + printf "\n\n$(cat .github/actions/update-hosts/nipio-hosts)" | sudo tee -a /etc/hosts + + - id: update-hosts-windows + name: Update C:\Windows\System32\drivers\etc\hosts + if: runner.os == 'Windows' + shell: powershell + run: | + "`n`n" | Add-Content C:\Windows\System32\drivers\etc\hosts + Get-Content .github/actions/update-hosts/nipio-hosts | Add-Content C:\Windows\System32\drivers\etc\hosts \ No newline at end of file diff --git a/.github/actions/update-hosts/nipio-hosts b/.github/actions/update-hosts/nipio-hosts new file mode 100644 index 000000000000..d7f1a601cf0f --- /dev/null +++ b/.github/actions/update-hosts/nipio-hosts @@ -0,0 +1,2 @@ +127.0.0.1 localtest.me 127.0.0.1.nip.io admin.127.0.0.1.nip.io localhost-myapp.127.0.0.1.nip.io localhost-sso.127.0.0.1.nip.io realmFrontend.127.0.0.1.nip.io proxy.kc.127.0.0.1.nip.io +::1 localtest.me 127.0.0.1.nip.io admin.127.0.0.1.nip.io localhost-myapp.127.0.0.1.nip.io localhost-sso.127.0.0.1.nip.io realmFrontend.127.0.0.1.nip.io proxy.kc.127.0.0.1.nip.io \ No newline at end of file diff --git a/.github/actions/upload-flaky-tests/action.yml b/.github/actions/upload-flaky-tests/action.yml index d76580b8e78b..b69de078072e 100644 --- a/.github/actions/upload-flaky-tests/action.yml +++ b/.github/actions/upload-flaky-tests/action.yml @@ -47,9 +47,9 @@ runs: echo "EOF" >> $GITHUB_OUTPUT fi - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: ${{ steps.flaky-tests.outputs.flakes }} with: name: flaky-tests-${{ github.job }}-${{ join(matrix.*, '-') }} path: ${{ steps.flaky-tests.outputs.flakes }} - if-no-files-found: error \ No newline at end of file + if-no-files-found: error diff --git a/.github/actions/upload-heapdumps/action.yml b/.github/actions/upload-heapdumps/action.yml index 65c2dc54c63f..c305f3d42c05 100644 --- a/.github/actions/upload-heapdumps/action.yml +++ b/.github/actions/upload-heapdumps/action.yml @@ -8,7 +8,7 @@ runs: name: Upload JVM Heapdumps # Windows runners are running into https://github.com/actions/upload-artifact/issues/240 if: runner.os != 'Windows' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: jvm-heap-dumps path: | diff --git a/.github/fake_fips/fake_fips.c b/.github/fake_fips/fake_fips.c index 5c0b38c39420..6ea9dda2a777 100644 --- a/.github/fake_fips/fake_fips.c +++ b/.github/fake_fips/fake_fips.c @@ -34,7 +34,9 @@ static struct ctl_table crypto_sysctl_table[] = { .mode = 0444, .proc_handler = proc_dointvec }, - {} +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 11, 0)) + {} +#endif }; static struct ctl_table crypto_dir_table[] = { { @@ -44,7 +46,9 @@ static struct ctl_table crypto_dir_table[] = { .child = crypto_sysctl_table #endif }, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 11, 0)) {} +#endif }; static struct ctl_table_header *crypto_sysctls; diff --git a/.github/mvn-rel-settings.xml b/.github/mvn-rel-settings.xml deleted file mode 100644 index 79949b291e93..000000000000 --- a/.github/mvn-rel-settings.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - keycloak-rel - - - - keycloak-rel - - - ${env.MAVEN_ID} - ${env.MAVEN_URL} - - true - - - true - - - - - - - - ${env.MAVEN_ID} - ${env.MAVEN_USERNAME} - ${env.MAVEN_PASSWORD} - - - gpg.passphrase - ${env.MAVEN_GPG_PASSPHRASE} - - - - diff --git a/.github/scripts/aws/rds/aurora_create.sh b/.github/scripts/aws/rds/aurora_create.sh index 08763f187a56..d24975254b87 100755 --- a/.github/scripts/aws/rds/aurora_create.sh +++ b/.github/scripts/aws/rds/aurora_create.sh @@ -116,6 +116,7 @@ aws rds create-db-cluster \ # For now only two AZs in each region are supported due to the two subnets created above for i in $( seq ${AURORA_INSTANCES} ); do aws rds create-db-instance \ + --no-auto-minor-version-upgrade \ --db-cluster-identifier ${AURORA_CLUSTER} \ --db-instance-identifier "${AURORA_CLUSTER}-instance-${i}" \ --db-instance-class ${AURORA_INSTANCE_CLASS} \ diff --git a/.github/scripts/pr-find-issues-test.sh b/.github/scripts/pr-find-issues-test.sh index 705d8adcc4a7..03de462421b9 100755 --- a/.github/scripts/pr-find-issues-test.sh +++ b/.github/scripts/pr-find-issues-test.sh @@ -5,7 +5,7 @@ source ./pr-find-issues.sh function testParsing() { echo -n "$1 -> $2 " - if [ $(parse_issues "$1") != "$2" ]; then + if [ "$(parse_issues "$1")" != "$2" ]; then echo "(failure)" return 1 fi @@ -22,3 +22,4 @@ trap 'testFailed' ERR testParsing "Closes #123" "123" testParsing "Fixes #123" "123" testParsing "Fixes: #123" "123" +testParsing "Fixes https://github.com/keycloak/keycloak/issues/123" "123" \ No newline at end of file diff --git a/.github/scripts/pr-find-issues.sh b/.github/scripts/pr-find-issues.sh index c91ccd85dcea..e9bd720ea29b 100755 --- a/.github/scripts/pr-find-issues.sh +++ b/.github/scripts/pr-find-issues.sh @@ -8,7 +8,11 @@ if [ "$REPO" == "" ]; then fi function parse_issues() { - echo "$1" | grep -i -P -o "(close|closes|closed|resolve|resolves|resolved|fixes|fixed):? #[[:digit:]]*" | cut -d '#' -f 2 | sort -n + echo "$1" | \ + grep -i -P -o "(close|closes|closed|resolve|resolves|resolved|fixes|fixed):? (#|https://github.com/keycloak/keycloak/issues/)[[:digit:]]*" | \ + sed -e 's|https://github.com/keycloak/keycloak/issues/|#|g' | \ + sed -e 's|keycloak/keycloak/issues/|#|g' | \ + cut -d '#' -f 2 | sort -n } if [ "$PR" != "" ]; then diff --git a/.github/scripts/prepare-quarkus-next.sh b/.github/scripts/prepare-quarkus-next.sh index 559e56ae00c5..60d6c1acd5d8 100755 --- a/.github/scripts/prepare-quarkus-next.sh +++ b/.github/scripts/prepare-quarkus-next.sh @@ -7,7 +7,7 @@ add_repository() { local id="sonatype-snapshots" local name="Sonatype Snapshots" - local url="https://s01.oss.sonatype.org/content/repositories/snapshots/" + local url="https://central.sonatype.com/repository/maven-snapshots/" # Decide the tag based on the element local tag diff --git a/.github/scripts/run-fips-it.sh b/.github/scripts/run-fips-it.sh index 48c46c969448..64b40498568c 100755 --- a/.github/scripts/run-fips-it.sh +++ b/.github/scripts/run-fips-it.sh @@ -1,5 +1,6 @@ #!/bin/bash -x +rm -f /etc/system-fips dnf install -y java-21-openjdk-devel fips-mode-setup --enable --no-bootcfg fips-mode-setup --is-enabled diff --git a/.github/scripts/run-fips-ut.sh b/.github/scripts/run-fips-ut.sh index c2ca47405fd4..0ebcb002a236 100755 --- a/.github/scripts/run-fips-ut.sh +++ b/.github/scripts/run-fips-ut.sh @@ -1,5 +1,6 @@ #!/bin/bash +rm -f /etc/system-fips dnf install -y java-21-openjdk-devel crypto-policies-scripts fips-mode-setup --enable --no-bootcfg fips-mode-setup --is-enabled diff --git a/.github/scripts/version-compatibility.sh b/.github/scripts/version-compatibility.sh new file mode 100755 index 000000000000..fa42ccf1a9b9 --- /dev/null +++ b/.github/scripts/version-compatibility.sh @@ -0,0 +1,25 @@ +#!/bin/bash -e + +if [[ "$RUNNER_DEBUG" == "1" ]]; then + set -x +fi + +TARGET_BRANCH="$1" +REPO="${2:-keycloak}" +ORG="${3:-keycloak}" + +if [[ "${TARGET_BRANCH}" != "release/"* ]]; then + exit 0 +fi + +ALL_RELEASES=$(gh release list \ + --repo "${ORG}/${REPO}" \ + --exclude-drafts \ + --exclude-pre-releases \ + --json name \ + --template '{{range .}}{{.name}}{{"\n"}}{{end}}' +) +MAJOR_MINOR=${TARGET_BRANCH#"release/"} +MAJOR_MINOR_RELEASES=$(echo "${ALL_RELEASES}" | grep "${MAJOR_MINOR}") + +echo "${MAJOR_MINOR_RELEASES}" | jq -cnR '[inputs] | map({version: .})' \ No newline at end of file diff --git a/.github/workflows/aurora-delete.yml b/.github/workflows/aurora-delete.yml index a2759dda6bc4..34fa4808ff5b 100644 --- a/.github/workflows/aurora-delete.yml +++ b/.github/workflows/aurora-delete.yml @@ -12,12 +12,15 @@ on: type: string required: true +permissions: + contents: read + jobs: delete: name: Delete Aurora DB runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Initialize AWS client run: | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 684eae6361bb..78152ad3215c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,6 +22,9 @@ defaults: run: shell: bash +permissions: + contents: read + jobs: conditional: @@ -34,9 +37,12 @@ jobs: ci-sssd: ${{ steps.conditional.outputs.ci-sssd }} ci-webauthn: ${{ steps.conditional.outputs.ci-webauthn }} ci-aurora: ${{ steps.auroradb-tests.outputs.run-aurora-tests }} - + ci-compatibility-matrix: ${{ steps.version-compatibility.outputs.matrix }} + permissions: + contents: read + pull-requests: read steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: conditional uses: ./.github/actions/conditional @@ -52,13 +58,27 @@ jobs: fi echo "run-aurora-tests=$RUN_AURORADB_TESTS" >> $GITHUB_OUTPUT + - name: Version Compatibility Matrix + id: version-compatibility + env: + GH_TOKEN: ${{ github.token }} + run: | + if [[ "${{ github.event_name }}" == "pull_request" ]]; then + BRANCH="${{ github.base_ref }}" + else + BRANCH="${{ github.ref_name }}" + fi + MATRIX_JSON=$(./.github/scripts/version-compatibility.sh "${BRANCH}") + echo "${MATRIX_JSON}" + echo "matrix=${MATRIX_JSON}" >> $GITHUB_OUTPUT + build: name: Build if: needs.conditional.outputs.ci == 'true' runs-on: ubuntu-latest needs: conditional steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Build Keycloak uses: ./.github/actions/build-keycloak @@ -73,7 +93,7 @@ jobs: needs: build timeout-minutes: 30 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: unit-test-setup name: Unit test setup @@ -110,7 +130,7 @@ jobs: group: [1, 2, 3, 4, 5, 6] fail-fast: false steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -145,7 +165,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 30 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -186,7 +206,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 30 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -231,7 +251,7 @@ jobs: os: [ ubuntu-latest, windows-latest ] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 # We want to download Keycloak artifacts - id: integration-test-setup @@ -281,9 +301,9 @@ jobs: fail-fast: false runs-on: ${{ matrix.os }} env: - MAVEN_OPTS: -Xmx1024m + MAVEN_OPTS: -Xmx1536m steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -325,11 +345,11 @@ jobs: matrix: os: [ubuntu-latest, windows-latest] dist: [temurin] - version: [17] + version: [17, 24] fail-fast: false runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -341,6 +361,9 @@ jobs: - name: Prepare Quarkus distribution with current JDK run: ./mvnw install -e -pl testsuite/integration-arquillian/servers/auth-server/quarkus + - name: Run new base tests + run: ./mvnw package -f tests/pom.xml -Dtest=JDKTestSuite + - name: Run base tests run: | TESTS=`testsuite/integration-arquillian/tests/base/testsuites/suite.sh jdk` @@ -353,7 +376,7 @@ jobs: - name: Build with JDK run: - ./mvnw install -e -DskipTests -DskipExamples + ./mvnw install -e -DskipTests -DskipExamples -DskipProtoLock=true - name: Upload JVM Heapdumps if: always() @@ -378,7 +401,7 @@ jobs: timeout-minutes: 100 runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -414,7 +437,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 150 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -445,7 +468,7 @@ jobs: - name: EC2 Maven Logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: store-it-mvn-logs path: .github/scripts/ansible/files @@ -461,7 +484,7 @@ jobs: variant: [ "clusterless,multi-site" ] fail-fast: false steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -496,8 +519,11 @@ jobs: if: needs.conditional.outputs.ci-aurora == 'true' runs-on: ubuntu-latest timeout-minutes: 150 + permissions: + contents: read + actions: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: node-cache name: Node cache @@ -551,7 +577,7 @@ jobs: pipx inject ansible-core boto3 botocore ./aws_ec2.sh create ${AWS_REGION} ${EC2_CLUSTER_NAME} ./keycloak_ec2_installer.sh ${AWS_REGION} ${EC2_CLUSTER_NAME} /tmp/keycloak.zip m2.tar.gz - ./mvn_ec2_runner.sh ${AWS_REGION} ${EC2_CLUSTER_NAME} "clean install -B -DskipTests -Pdistribution" + ./mvn_ec2_runner.sh ${AWS_REGION} ${EC2_CLUSTER_NAME} "clean install -B -DskipTests -Pdistribution -DskipProtoLock=true" ./mvn_ec2_runner.sh ${AWS_REGION} ${EC2_CLUSTER_NAME} "clean install -B -DskipTests -pl testsuite/integration-arquillian/servers/auth-server/quarkus -Pauth-server-quarkus -Pdb-aurora-postgres -Dmaven.build.cache.enabled=true" - name: Run Aurora migration tests on EC2 @@ -594,7 +620,7 @@ jobs: - name: EC2 Maven Logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: auroraDB-migration-tests-mvn-logs path: .github/scripts/ansible/files @@ -637,7 +663,7 @@ jobs: - name: EC2 Maven Logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: aurora-integration-tests-mvn-logs path: .github/scripts/ansible/files @@ -670,7 +696,7 @@ jobs: db: [postgres, mysql, oracle, mssql, mariadb] fail-fast: false steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -678,7 +704,7 @@ jobs: - name: Run new base tests run: | - KC_TEST_DATABASE=${{ matrix.db }} KC_TEST_DATABASE_REUSE=true TESTCONTAINERS_REUSE_ENABLE=true ./mvnw test -f tests/pom.xml -Dtest=DatabaseTestSuite + KC_TEST_DATABASE=${{ matrix.db }} KC_TEST_DATABASE_REUSE=true TESTCONTAINERS_REUSE_ENABLE=true ./mvnw package -f tests/pom.xml -Dtest=DatabaseTestSuite -Dkeycloak.distribution.start.timeout=360 - name: Database container port run: | @@ -696,6 +722,7 @@ jobs: -Ddocker.database.skip=true \ -Ddocker.database.port=$DATABASE_PORT \ -Ddocker.container.testdb.ip=localhost \ + -Dkeycloak.distribution.start.timeout=360 \ -pl testsuite/integration-arquillian/tests/base 2>&1 | misc/log/trimmer.sh - name: Run cluster JDBC_PING2 UDP smoke test @@ -750,7 +777,7 @@ jobs: if: needs.conditional.outputs.ci-store == 'true' timeout-minutes: 75 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -782,9 +809,9 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 35 env: - MAVEN_OPTS: -Xmx1024m + MAVEN_OPTS: -Xmx1536m steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -817,7 +844,11 @@ jobs: needs: build timeout-minutes: 20 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - id: unit-test-setup + name: Unit test setup + uses: ./.github/actions/unit-test-setup - name: Fake fips run: | @@ -825,10 +856,6 @@ jobs: make sudo insmod fake_fips.ko - - id: unit-test-setup - name: Unit test setup - uses: ./.github/actions/unit-test-setup - - name: Run crypto tests run: docker run --rm --workdir /github/workspace -v "${{ github.workspace }}":"/github/workspace" -v "$HOME/.m2":"/root/.m2" registry.access.redhat.com/ubi8/ubi:latest .github/scripts/run-fips-ut.sh @@ -852,13 +879,7 @@ jobs: mode: [non-strict, strict] fail-fast: false steps: - - uses: actions/checkout@v4 - - - name: Fake fips - run: | - cd .github/fake_fips - make - sudo insmod fake_fips.ko + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -866,6 +887,12 @@ jobs: with: jdk-version: 21 + - name: Fake fips + run: | + cd .github/fake_fips + make + sudo insmod fake_fips.ko + - name: Run base tests run: docker run --rm --workdir /github/workspace -e "SUREFIRE_RERUN_FAILING_COUNT" -v "${{ github.workspace }}":"/github/workspace" -v "$HOME/.m2":"/root/.m2" registry.access.redhat.com/ubi8/ubi:latest .github/scripts/run-fips-it.sh ${{ matrix.mode }} @@ -896,12 +923,15 @@ jobs: browser: [chrome, firefox] fail-fast: false steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup uses: ./.github/actions/integration-test-setup + - uses: ./.github/actions/install-chrome + if: matrix.browser == 'chrome' + - name: Run Forms IT run: | TESTS=`testsuite/integration-arquillian/tests/base/testsuites/suite.sh forms` @@ -938,12 +968,15 @@ jobs: - firefox fail-fast: false steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup uses: ./.github/actions/integration-test-setup + - uses: ./.github/actions/install-chrome + if: matrix.browser == 'chrome' + - name: Run WebAuthn IT run: | TESTS=`testsuite/integration-arquillian/tests/base/testsuites/suite.sh webauthn` @@ -977,7 +1010,7 @@ jobs: timeout-minutes: 30 steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -990,7 +1023,7 @@ jobs: - id: cache-maven-repository name: ipa-data cache - uses: actions/cache@v4 + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: path: ~/ipa-data.tar key: ${{ steps.weekly-cache-key.outputs.key }} @@ -1015,7 +1048,7 @@ jobs: database: [postgres, mysql, oracle, mssql, mariadb] fail-fast: false steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -1057,7 +1090,7 @@ jobs: needs: build timeout-minutes: 30 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup @@ -1073,14 +1106,49 @@ jobs: - build timeout-minutes: 30 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: integration-test-setup name: Integration test setup uses: ./.github/actions/integration-test-setup + # This step is necessary because test/clustering requires building a new Keycloak image built from tar.gz + # file that is not part of m2-keycloak.tzts archive + - name: Build tar keycloak-quarkus-dist + run: ./mvnw package -pl quarkus/server/,quarkus/dist/ + - name: Run tests - run: ./mvnw test -f tests/pom.xml + run: ./mvnw package -f tests/pom.xml + + mixed-cluster-compatibility-tests: + name: Cluster Compatibility Tests + if: needs.conditional.outputs.ci-compatibility-matrix != '' + runs-on: ubuntu-latest + needs: + - build + - conditional + strategy: + fail-fast: false + matrix: + include: ${{ fromJSON(needs.conditional.outputs.ci-compatibility-matrix) }} + timeout-minutes: 10 + + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - id: integration-test-setup + name: Integration test setup + uses: ./.github/actions/integration-test-setup + + # This step is necessary because test/clustering requires building a new Keycloak image built from tar.gz + # file that is not part of m2-keycloak.tzts archive + - name: Build tar keycloak-quarkus-dist + run: ./mvnw package -pl quarkus/server/,quarkus/dist/ + + - name: Run tests + run: ./mvnw verify -pl tests/clustering + env: + KC_TEST_SERVER_IMAGES: "quay.io/keycloak/keycloak:${{ matrix.version }},-" check: name: Status Check - Keycloak CI @@ -1108,9 +1176,10 @@ jobs: - external-infinispan-tests - test-framework - base-new-integration-tests + - mixed-cluster-compatibility-tests runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./.github/actions/status-check with: jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index b51c2bb56208..afbd4e885440 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -22,8 +22,10 @@ defaults: run: shell: bash -jobs: +permissions: + contents: read +jobs: conditional: name: Check conditional workflows and jobs runs-on: ubuntu-latest @@ -31,8 +33,11 @@ jobs: java: ${{ steps.conditional.outputs.codeql-java }} javascript: ${{ steps.conditional.outputs.codeql-javascript }} typescript: ${{ steps.conditional.outputs.codeql-typescript }} + permissions: + contents: read + pull-requests: read steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: conditional uses: ./.github/actions/conditional @@ -43,15 +48,17 @@ jobs: name: CodeQL Java needs: conditional runs-on: ubuntu-latest + permissions: + security-events: write # Required for SARIF upload if: needs.conditional.outputs.java == 'true' outputs: conclusion: ${{ steps.check.outputs.conclusion }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: languages: java @@ -59,7 +66,7 @@ jobs: uses: ./.github/actions/build-keycloak - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: wait-for-processing: true env: @@ -69,22 +76,24 @@ jobs: name: CodeQL JavaScript needs: conditional runs-on: ubuntu-latest + permissions: + security-events: write # Required for SARIF upload if: needs.conditional.outputs.javascript == 'true' outputs: conclusion: ${{ steps.check.outputs.conclusion }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 env: CODEQL_ACTION_EXTRA_OPTIONS: '{"database":{"finalize":["--no-run-unnecessary-builds"]}}' with: languages: javascript - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: wait-for-processing: true env: @@ -94,22 +103,24 @@ jobs: name: CodeQL TypeScript needs: conditional runs-on: ubuntu-latest + permissions: + security-events: write # Required for SARIF upload if: needs.conditional.outputs.typescript == 'true' outputs: conclusion: ${{ steps.check.outputs.conclusion }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 env: CODEQL_ACTION_EXTRA_OPTIONS: '{"database":{"finalize":["--no-run-unnecessary-builds"]}}' with: languages: typescript - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: wait-for-processing: true env: @@ -125,7 +136,7 @@ jobs: - typescript runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./.github/actions/status-check with: jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 1843f08d6e7f..58aa4ab1e236 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -21,6 +21,9 @@ defaults: run: shell: bash +permissions: + contents: read + jobs: conditional: @@ -28,8 +31,11 @@ jobs: runs-on: ubuntu-latest outputs: documentation: ${{ steps.conditional.outputs.documentation }} + permissions: + contents: read + pull-requests: read steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: conditional uses: ./.github/actions/conditional @@ -42,7 +48,7 @@ jobs: runs-on: ubuntu-latest needs: conditional steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: setup-java name: Setup Java @@ -60,7 +66,7 @@ jobs: - id: upload-keycloak-documentation name: Upload Keycloak documentation - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: keycloak-documentation path: docs/documentation/dist/target/*.zip @@ -72,7 +78,7 @@ jobs: runs-on: ubuntu-latest needs: conditional steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: setup-java name: Setup Java @@ -96,7 +102,7 @@ jobs: - build runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./.github/actions/status-check with: - jobs: ${{ toJSON(needs) }} \ No newline at end of file + jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/guides.yml b/.github/workflows/guides.yml index 720398a37e52..6a521254d6a3 100644 --- a/.github/workflows/guides.yml +++ b/.github/workflows/guides.yml @@ -21,6 +21,9 @@ defaults: run: shell: bash +permissions: + contents: read + jobs: conditional: @@ -29,8 +32,11 @@ jobs: outputs: guides: ${{ steps.conditional.outputs.guides }} ci: ${{ steps.conditional.outputs.ci }} + permissions: + contents: read + pull-requests: read steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: conditional uses: ./.github/actions/conditional @@ -44,7 +50,7 @@ jobs: runs-on: ubuntu-latest needs: conditional steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Build Keycloak uses: ./.github/actions/build-keycloak @@ -57,7 +63,7 @@ jobs: - build runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./.github/actions/status-check with: jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/js-ci.yml b/.github/workflows/js-ci.yml index 415fcdab895b..1a60b211986e 100644 --- a/.github/workflows/js-ci.yml +++ b/.github/workflows/js-ci.yml @@ -22,14 +22,20 @@ defaults: run: shell: bash +permissions: + contents: read + jobs: conditional: name: Check conditional workflows and jobs runs-on: ubuntu-latest outputs: js-ci: ${{ steps.conditional.outputs.js }} + permissions: + contents: read + pull-requests: read steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: conditional uses: ./.github/actions/conditional @@ -42,7 +48,7 @@ jobs: if: needs.conditional.outputs.js-ci == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Build Keycloak uses: ./.github/actions/build-keycloak @@ -52,7 +58,7 @@ jobs: mv ./quarkus/dist/target/keycloak-999.0.0-SNAPSHOT.tar.gz ./keycloak-999.0.0-SNAPSHOT.tar.gz - name: Upload Keycloak dist - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: keycloak path: keycloak-999.0.0-SNAPSHOT.tar.gz @@ -65,7 +71,7 @@ jobs: env: WORKSPACE: "@keycloak/keycloak-admin-client" steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./.github/actions/pnpm-setup @@ -83,7 +89,7 @@ jobs: env: WORKSPACE: "@keycloak/keycloak-ui-shared" steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./.github/actions/pnpm-setup @@ -101,7 +107,7 @@ jobs: env: WORKSPACE: "@keycloak/keycloak-account-ui" steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./.github/actions/pnpm-setup @@ -119,7 +125,7 @@ jobs: env: WORKSPACE: keycloak-admin-ui steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./.github/actions/pnpm-setup @@ -132,9 +138,6 @@ jobs: - run: pnpm --fail-if-no-match --filter ${{ env.WORKSPACE }} build working-directory: js - - run: pnpm --fail-if-no-match --filter ${{ env.WORKSPACE }} cy:check-types - working-directory: js - account-ui-e2e: name: Account UI E2E needs: @@ -145,12 +148,12 @@ jobs: env: WORKSPACE: "@keycloak/keycloak-account-ui" steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./.github/actions/pnpm-setup - name: Download Keycloak server - uses: actions/download-artifact@v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 with: name: keycloak @@ -174,7 +177,7 @@ jobs: working-directory: js - name: Upload Playwright report - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: account-ui-playwright-report @@ -183,7 +186,7 @@ jobs: - name: Upload server logs if: always() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: account-ui-server-log path: ~/server.log @@ -216,26 +219,13 @@ jobs: WORKSPACE: keycloak-admin-ui strategy: matrix: - container: [1, 2, 3, 4, 5] - browser: [chrome, firefox] + browser: [chromium, firefox] exclude: # Only test with Firefox on scheduled runs - browser: ${{ github.event_name != 'workflow_dispatch' && 'firefox' || '' }} fail-fast: false steps: - - uses: actions/checkout@v4 - - - name: Install Google Chrome - if: matrix.browser == 'chrome' - uses: browser-actions/setup-chrome@v1 - with: - chrome-version: stable - - - name: Install Firefox - if: matrix.browser == 'firefox' - uses: browser-actions/setup-firefox@v1 - with: - firefox-version: latest + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./.github/actions/pnpm-setup @@ -244,7 +234,7 @@ jobs: working-directory: js - name: Download Keycloak server - uses: actions/download-artifact@v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 with: name: keycloak @@ -254,45 +244,36 @@ jobs: - name: Start Keycloak server run: | tar xfvz keycloak-999.0.0-SNAPSHOT.tar.gz - keycloak-999.0.0-SNAPSHOT/bin/kc.sh start-dev --features=admin-fine-grained-authz:v1,transient-users &> ~/server.log & + keycloak-999.0.0-SNAPSHOT/bin/kc.sh start-dev --features=admin-fine-grained-authz:v2,transient-users &> ~/server.log & env: KC_BOOTSTRAP_ADMIN_USERNAME: admin KC_BOOTSTRAP_ADMIN_PASSWORD: admin KC_BOOTSTRAP_ADMIN_CLIENT_ID: temporary-admin-service KC_BOOTSTRAP_ADMIN_CLIENT_SECRET: temporary-admin-service - - name: Start LDAP server - run: pnpm --fail-if-no-match --filter ${{ env.WORKSPACE }} cy:ldap-server & + - name: Install Playwright browsers + run: pnpm --fail-if-no-match --filter ${{ env.WORKSPACE }} exec playwright install --with-deps + working-directory: js + + - name: Run Playwright tests + run: pnpm --fail-if-no-match --filter ${{ env.WORKSPACE }} test:integration --project=${{ matrix.browser }} working-directory: js - - name: Run Cypress - uses: cypress-io/github-action@v6 + - name: Upload Playwright report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + if: always() with: - install: false - browser: ${{ matrix.browser }} - wait-on: http://localhost:8080 - working-directory: js/apps/admin-ui - env: - SPLIT: ${{ strategy.job-total }} - SPLIT_INDEX: ${{ strategy.job-index }} - SPLIT_RANDOM_SEED: ${{ needs.generate-test-seed.outputs.seed }} + name: admin-ui-playwright-report-${{ matrix.browser }} + path: js/apps/admin-ui/playwright-report + retention-days: 30 - name: Upload server logs if: always() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: - name: admin-ui-server-log-${{ matrix.container }}-${{ matrix.browser }} + name: admin-ui-server-log-${{ matrix.browser }} path: ~/server.log - - name: Upload Cypress videos - uses: actions/upload-artifact@v4 - if: always() && github.repository != 'keycloak/keycloak-private' - with: - name: cypress-videos-${{ matrix.container }}-${{ matrix.browser }} - path: js/apps/admin-ui/cypress/videos - if-no-files-found: ignore - retention-days: 10 - check: name: Status Check - Keycloak JavaScript CI if: always() @@ -307,7 +288,7 @@ jobs: - admin-ui-e2e runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./.github/actions/status-check with: jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/label.yml b/.github/workflows/label.yml index c06af0ae583e..864874900f54 100644 --- a/.github/workflows/label.yml +++ b/.github/workflows/label.yml @@ -3,16 +3,17 @@ on: pull_request_target: types: closed +permissions: + contents: read + jobs: label: runs-on: ubuntu-latest permissions: - contents: read - issues: write - + issues: write # Required to add labels to Issues steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: sparse-checkout: .github/scripts - name: Add release labels on merge @@ -26,7 +27,7 @@ jobs: if [ "$GITHUB_BASE_REF" == "main" ]; then LAST_RELEASE="$(gh api /repos/$GITHUB_REPOSITORY/branches --paginate --jq .[].name | grep '^release/' | cut -d '/' -f 2 | sort -n -r | head -n 1)" - LAST_MINOR=$(echo $LAST_MINOR | cut -d '.' -f 2) + LAST_MINOR=$(echo $LAST_RELEASE | cut -d '.' -f 2) NEXT_MAJOR=$(echo $LAST_RELEASE | cut -d '.' -f 1) NEXT_MINOR="$(($LAST_MINOR + 1))" LABEL="release/$NEXT_MAJOR.$NEXT_MINOR.0" @@ -40,9 +41,9 @@ jobs: fi echo "Label: $LABEL" - echo "**Label:** [$LABEL](https://github.com/$GITHUB_REPOSITORY/labels/$(echo $LABEL | sed 's|/|%2F|g'))" >> $GITHUB_STEP_SUMMARY + echo "**Label:** [$LABEL](https://github.com/$GITHUB_REPOSITORY/labels/$LABEL)" >> $GITHUB_STEP_SUMMARY - gh api "repos/$GITHUB_REPOSITORY/labels/$(echo $LABEL | sed 's|/|%2F|g')" --silent 2>/dev/null || gh label create -R "$GITHUB_REPOSITORY" "$LABEL" -c "0E8A16" + gh api "/repos/$GITHUB_REPOSITORY/labels/$LABEL" --silent 2>/dev/null || gh label create -R "$GITHUB_REPOSITORY" "$LABEL" -c "0E8A16" echo "" echo "" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/operator-ci.yml b/.github/workflows/operator-ci.yml index 078c4d03000e..0b813498fb34 100644 --- a/.github/workflows/operator-ci.yml +++ b/.github/workflows/operator-ci.yml @@ -23,6 +23,9 @@ concurrency: group: operator-ci-${{ github.ref }} cancel-in-progress: true +permissions: + contents: read + jobs: conditional: @@ -30,8 +33,11 @@ jobs: runs-on: ubuntu-latest outputs: operator: ${{ steps.conditional.outputs.operator }} + permissions: + contents: read + pull-requests: read steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: conditional uses: ./.github/actions/conditional @@ -44,7 +50,7 @@ jobs: runs-on: ubuntu-latest needs: conditional steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Build Keycloak uses: ./.github/actions/build-keycloak @@ -52,56 +58,29 @@ jobs: upload-m2-repo: false upload-dist: true - test-local: - name: Test local + test-local-apiserver: + name: Test local apiserver runs-on: ubuntu-latest needs: [build] steps: - - uses: actions/checkout@v4 - - - name: Set version - id: vars - run: echo "version_local=0.0.1-${GITHUB_SHA::6}" >> $GITHUB_ENV + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Setup Java uses: ./.github/actions/java-setup - - name: Setup Minikube-Kubernetes - uses: manusa/actions-setup-minikube@v2.13.0 - with: - minikube version: ${{ env.MINIKUBE_VERSION }} - kubernetes version: ${{ env.KUBERNETES_VERSION }} - github token: ${{ secrets.GITHUB_TOKEN }} - driver: docker - start args: --addons=ingress --memory=${{ env.MINIKUBE_MEMORY }} --cni cilium --cpus=max - - - name: Download keycloak distribution - id: download-keycloak-dist - uses: actions/download-artifact@v4 - with: - name: keycloak-dist - path: quarkus/container - - - name: Build Keycloak Docker images - run: | - eval $(minikube -p minikube docker-env) - (cd quarkus/container && docker build --build-arg KEYCLOAK_DIST=$(ls keycloak-*.tar.gz) . -t keycloak:${{ env.version_local }}) - (cd operator && ./scripts/build-testing-docker-images.sh ${{ env.version_local }} keycloak custom-keycloak) - - name: Test operator running locally run: | - ./mvnw install -Poperator -pl :keycloak-operator -am \ - -Dquarkus.kubernetes.image-pull-policy=IfNotPresent \ - -Dkc.operator.keycloak.image=keycloak:${{ env.version_local }} \ - -Dtest.operator.custom.image=custom-keycloak:${{ env.version_local }} \ - -Dkc.operator.keycloak.image-pull-policy=Never + ./mvnw install -Poperator -pl :keycloak-operator -am test-remote: name: Test remote runs-on: ubuntu-latest needs: [build] + strategy: + matrix: + suite: [slow, fast] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Set version id: vars @@ -111,7 +90,7 @@ jobs: uses: ./.github/actions/java-setup - name: Setup Minikube-Kubernetes - uses: manusa/actions-setup-minikube@v2.13.0 + uses: manusa/actions-setup-minikube@b589f2d61bf96695c546929c72b38563e856059d # v2.14.0 with: minikube version: ${{ env.MINIKUBE_VERSION }} kubernetes version: ${{ env.KUBERNETES_VERSION }} @@ -121,7 +100,7 @@ jobs: - name: Download keycloak distribution id: download-keycloak-dist - uses: actions/download-artifact@v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 with: name: keycloak-dist path: quarkus/container @@ -134,6 +113,10 @@ jobs: - name: Test operator running in cluster run: | + declare -A PARAMS + PARAMS["slow"]="-Dkc.quarkus.tests.groups=slow" + PARAMS["fast"]='-Dkc.quarkus.tests.groups=!slow' + eval $(minikube -p minikube docker-env) ./mvnw install -Poperator -pl :keycloak-operator -am \ -Dquarkus.container-image.build=true \ @@ -141,29 +124,29 @@ jobs: -Dkc.operator.keycloak.image=keycloak:${{ env.version_remote }} \ -Dquarkus.kubernetes.env.vars.kc-operator-keycloak-image-pull-policy=Never \ -Dtest.operator.custom.image=custom-keycloak:${{ env.version_remote }} \ - --no-transfer-progress -Dtest.operator.deployment=remote + --no-transfer-progress -Dtest.operator.deployment=remote ${PARAMS["${{ matrix.suite }}"]} test-olm: name: Test OLM installation runs-on: ubuntu-latest needs: [build] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Setup Java uses: ./.github/actions/java-setup - name: Setup Minikube-Kubernetes - uses: manusa/actions-setup-minikube@v2.13.0 + uses: manusa/actions-setup-minikube@b589f2d61bf96695c546929c72b38563e856059d # v2.14.0 with: minikube version: ${{ env.MINIKUBE_VERSION }} kubernetes version: ${{ env.KUBERNETES_VERSION }} github token: ${{ secrets.GITHUB_TOKEN }} driver: docker - start args: --memory=${{ env.MINIKUBE_MEMORY }} + start args: --memory=${{ env.MINIKUBE_MEMORY }} --addons=registry --insecure-registry=192.168.49.0/24 - name: Install OPM - uses: redhat-actions/openshift-tools-installer@v1 + uses: redhat-actions/openshift-tools-installer@144527c7d98999f2652264c048c7a9bd103f8a82 # v1.13.1 with: source: github opm: 1.21.0 @@ -177,7 +160,7 @@ jobs: - name: Download keycloak distribution id: download-keycloak-dist - uses: actions/download-artifact@v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 with: name: keycloak-dist path: quarkus/container @@ -185,8 +168,9 @@ jobs: - name: Arrange OLM test installation working-directory: operator run: | + echo "Minikube IP $(minikube ip)" eval $(minikube -p minikube docker-env) - ./scripts/olm-testing.sh ${GITHUB_SHA::6} + REGISTRY=$(minikube ip):5000 ./scripts/olm-testing.sh ${GITHUB_SHA::6} - name: Deploy an example Keycloak and wait for it to be ready working-directory: operator @@ -233,12 +217,12 @@ jobs: needs: - conditional - build - - test-local + - test-local-apiserver - test-remote - test-olm runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./.github/actions/status-check with: jobs: ${{ toJSON(needs) }} diff --git a/.github/workflows/quarkus-next.yml b/.github/workflows/quarkus-next.yml index ec235fbf2216..532e662b83ba 100644 --- a/.github/workflows/quarkus-next.yml +++ b/.github/workflows/quarkus-next.yml @@ -14,14 +14,18 @@ concurrency: group: quarkus-next-${{ github.ref }} cancel-in-progress: true +permissions: + contents: read + jobs: update-quarkus-next-branch: name: Update quarkus-next branch if: github.event_name != 'schedule' || github.repository == 'keycloak/keycloak' runs-on: ubuntu-latest - + permissions: + contents: write # Required to push changes to the repository steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: main fetch-depth: 0 @@ -42,6 +46,8 @@ jobs: run-matrix-with-quarkus-next: name: Run workflow matrix with the quarkus-next branch runs-on: ubuntu-latest + permissions: + actions: write # Required to trigger workflows using gh needs: - update-quarkus-next-branch diff --git a/.github/workflows/schedule-nightly.yml b/.github/workflows/schedule-nightly.yml index a3c93a62013b..47f9396bf932 100644 --- a/.github/workflows/schedule-nightly.yml +++ b/.github/workflows/schedule-nightly.yml @@ -5,11 +5,15 @@ on: - cron: '0 0 * * *' workflow_dispatch: -jobs: +permissions: + contents: read +jobs: setup: if: github.event_name != 'schedule' || github.repository == 'keycloak/keycloak' runs-on: ubuntu-latest + permissions: + actions: write # Required to trigger workflows using gh outputs: latest-release-branch: ${{ steps.latest-release.outputs.branch }} steps: @@ -24,8 +28,9 @@ jobs: run-default-branch: name: Run default branch runs-on: ubuntu-latest + permissions: + actions: write # Required to trigger workflows using gh needs: setup - strategy: matrix: workflow: @@ -47,7 +52,8 @@ jobs: name: Run latest release branch needs: setup runs-on: ubuntu-latest - + permissions: + actions: write # Required to trigger workflows using gh strategy: matrix: workflow: diff --git a/.github/workflows/snyk-analysis.yml b/.github/workflows/snyk-analysis.yml index 3d484338805e..761b69512325 100644 --- a/.github/workflows/snyk-analysis.yml +++ b/.github/workflows/snyk-analysis.yml @@ -10,18 +10,23 @@ defaults: run: shell: bash +permissions: + contents: read + jobs: analysis: name: Analysis of Quarkus and Operator runs-on: ubuntu-latest if: github.repository == 'keycloak/keycloak' + permissions: + security-events: write # Required for SARIF uploads steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Build Keycloak uses: ./.github/actions/build-keycloak - - uses: snyk/actions/setup@master + - uses: snyk/actions/setup@28606799782bc8e809f4076e9f8293bc4212d05e # master - name: Check for vulnerabilities in Quarkus run: snyk test --policy-path=${GITHUB_WORKSPACE}/.github/snyk/.snyk --all-projects --prune-repeated-subdependencies --exclude=tests --sarif-file-output=quarkus-report.sarif quarkus/deployment @@ -30,7 +35,7 @@ jobs: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} - name: Upload Quarkus scanner results to GitHub - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 continue-on-error: true with: sarif_file: quarkus-report.sarif @@ -45,7 +50,7 @@ jobs: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} - name: Upload Operator scanner results to GitHub - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: sarif_file: operator-report.sarif category: snyk-operator-report diff --git a/.github/workflows/trivy-analysis.yml b/.github/workflows/trivy-analysis.yml index be9559f8bf58..ad8e25b9e3cb 100644 --- a/.github/workflows/trivy-analysis.yml +++ b/.github/workflows/trivy-analysis.yml @@ -7,6 +7,9 @@ defaults: run: shell: bash +permissions: + contents: read + jobs: analysis: @@ -17,9 +20,14 @@ jobs: matrix: container: [keycloak, keycloak-operator] fail-fast: false + permissions: + security-events: write # Required for SARIF uploads steps: + - name: Checkout code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@0.29.0 + uses: aquasecurity/trivy-action@dc5a429b52fcf669ce959baa2c2dd26090d2a6c4 # 0.32.0 with: image-ref: quay.io/keycloak/${{ matrix.container }}:nightly format: sarif @@ -33,7 +41,7 @@ jobs: TRIVY_JAVA_DB_REPOSITORY: public.ecr.aws/aquasecurity/trivy-java-db - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: sarif_file: trivy-results.sarif category: ${{ matrix.container }} diff --git a/.github/workflows/weblate.yml b/.github/workflows/weblate.yml index 98c5de4c3f6d..ba569f04e72f 100644 --- a/.github/workflows/weblate.yml +++ b/.github/workflows/weblate.yml @@ -22,6 +22,9 @@ concurrency: group: weblate-${{ github.ref }} cancel-in-progress: true +permissions: + contents: read + jobs: update-weblate: name: Trigger Weblate to pull the latest changes diff --git a/.gitignore b/.gitignore index 750fb42afd14..b68f39bdeaef 100644 --- a/.gitignore +++ b/.gitignore @@ -96,3 +96,4 @@ quarkus/data/*.db .java-version .env +.env.test diff --git a/ADOPTERS.md b/ADOPTERS.md index 04f5a8454cab..00f9e50dd23e 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -50,6 +50,8 @@ List of organization names below is based on information collected using Keycloa * Prodesan * Quest Software * Research Industrial Software Engineering (RISE) +* [SICK AG](https://www.sick.com) +* [SMF](https://www.smf.de) * Sportsbet.com.au * [Stacklok](https://stacklok.com/) * Stack Labs @@ -61,4 +63,5 @@ List of organization names below is based on information collected using Keycloa * TRT9 - Brasil * UnitedHealthcare * Wayfair LLC +* [Xata](https://xata.io) * ...More individuals diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 173dade71e87..a9aa88dc22ed 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -3,7 +3,6 @@ * [Alexander Schwartz](https://github.com/ahus1) * [Bruno Oliveira da Silva](https://github.com/abstractj) * [Marek Posolda](https://github.com/mposolda) -* [Michal Hajas](https://github.com/mhajas) * [Pedro Igor](https://github.com/pedroigor) * [Sebastian Schuster](https://github.com/sschu) * [Stan Silvert](https://github.com/ssilvert) @@ -15,5 +14,6 @@ # Emeritus maintainers * [Hynek Mlnařík](https://github.com/hmlnarik) +* [Michal Hajas](https://github.com/mhajas) * [Pavel Drozd](https://github.com/pdrozd) diff --git a/README.md b/README.md index 36998a9327f1..c9856046d433 100644 --- a/README.md +++ b/README.md @@ -2,10 +2,12 @@ ![GitHub Release](https://img.shields.io/github/v/release/keycloak/keycloak?label=latest%20release) [![OpenSSF Best Practices](https://bestpractices.coreinfrastructure.org/projects/6818/badge)](https://bestpractices.coreinfrastructure.org/projects/6818) +[![CLOMonitor](https://img.shields.io/endpoint?url=https://clomonitor.io/api/projects/cncf/keycloak/badge)](https://clomonitor.io/projects/cncf/keycloak) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/keycloak/keycloak/badge)](https://securityscorecards.dev/viewer/?uri=github.com/keycloak/keycloak) [![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/keycloak-operator)](https://artifacthub.io/packages/olm/community-operators/keycloak-operator) ![GitHub Repo stars](https://img.shields.io/github/stars/keycloak/keycloak?style=flat) ![GitHub commit activity](https://img.shields.io/github/commit-activity/m/keycloak/keycloak) -[![Translation status](https://hosted.weblate.org/widget/keycloak/svg-badge.svg)](https://hosted.weblate.org/engage/keycloak/) +[![Translation status](https://hosted.weblate.org/widget/keycloak/svg-badge.svg)](docs/translation.md) # Open Source Identity and Access Management @@ -18,6 +20,7 @@ Keycloak provides user federation, strong authentication, user management, fine- * [Documentation](https://www.keycloak.org/documentation.html) * [User Mailing List](https://groups.google.com/d/forum/keycloak-user) - Mailing list for help and general questions about Keycloak +* Join [#keycloak](https://cloud-native.slack.com/archives/C056HC17KK9) for general questions, or [#keycloak-dev](https://cloud-native.slack.com/archives/C056XU905S6) on Slack for design and development discussions, by creating an account at [https://slack.cncf.io/](https://slack.cncf.io/). ## Reporting Security Vulnerabilities diff --git a/adapters/saml/core/src/main/java/org/keycloak/adapters/saml/rotation/SamlDescriptorPublicKeyLocator.java b/adapters/saml/core/src/main/java/org/keycloak/adapters/saml/rotation/SamlDescriptorPublicKeyLocator.java index d8f9d7356c33..febaee398da7 100644 --- a/adapters/saml/core/src/main/java/org/keycloak/adapters/saml/rotation/SamlDescriptorPublicKeyLocator.java +++ b/adapters/saml/core/src/main/java/org/keycloak/adapters/saml/rotation/SamlDescriptorPublicKeyLocator.java @@ -24,7 +24,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import javax.security.auth.x500.X500Principal; import javax.xml.crypto.dsig.keyinfo.KeyInfo; @@ -34,6 +33,7 @@ import org.keycloak.adapters.cloned.HttpAdapterUtils; import org.keycloak.adapters.cloned.HttpClientAdapterException; import org.keycloak.common.util.MultivaluedHashMap; +import org.keycloak.common.util.SecretGenerator; import org.keycloak.common.util.Time; import org.keycloak.dom.saml.v2.metadata.KeyTypes; import org.keycloak.rotation.KeyLocator; @@ -179,7 +179,7 @@ private synchronized Key refreshCertificateCacheAndGet(T key, Map ca this.publicKeyCacheByKey.put(new KeyHash(x509certificate.getPublicKey()), x509certificate.getPublicKey()); } else { final X500Principal principal = x509certificate.getSubjectX500Principal(); - String name = (principal == null ? "unnamed" : principal.getName()) + "@" + x509certificate.getSerialNumber() + "$" + UUID.randomUUID(); + String name = (principal == null ? "unnamed" : principal.getName()) + "@" + x509certificate.getSerialNumber() + "$" + SecretGenerator.getInstance().generateSecureID(); this.publicKeyCacheByName.put(name, x509certificate.getPublicKey()); this.publicKeyCacheByKey.put(new KeyHash(x509certificate.getPublicKey()), x509certificate.getPublicKey()); LOG.tracef("Adding certificate %s without a specific key name: %s", name, x509certificate); diff --git a/authz/client/src/main/java/org/keycloak/authorization/client/ResourceNotFoundException.java b/authz/client/src/main/java/org/keycloak/authorization/client/ResourceNotFoundException.java new file mode 100644 index 000000000000..c418ba9ac29d --- /dev/null +++ b/authz/client/src/main/java/org/keycloak/authorization/client/ResourceNotFoundException.java @@ -0,0 +1,32 @@ +/* + * Copyright 2016 Red Hat, Inc. and/or its affiliates + * and other contributors as indicated by the @author tags. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.keycloak.authorization.client; + +/** + * @author Pedro Igor + */ +public class ResourceNotFoundException extends RuntimeException { + + public ResourceNotFoundException(Throwable cause) { + super(cause); + } + + public ResourceNotFoundException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/authz/client/src/main/java/org/keycloak/authorization/client/util/Throwables.java b/authz/client/src/main/java/org/keycloak/authorization/client/util/Throwables.java index 016daf28787b..00121f42fcfe 100644 --- a/authz/client/src/main/java/org/keycloak/authorization/client/util/Throwables.java +++ b/authz/client/src/main/java/org/keycloak/authorization/client/util/Throwables.java @@ -19,6 +19,7 @@ import java.util.concurrent.Callable; import org.keycloak.authorization.client.AuthorizationDeniedException; +import org.keycloak.authorization.client.ResourceNotFoundException; import org.keycloak.authorization.client.representation.TokenIntrospectionResponse; /** @@ -85,6 +86,8 @@ public static V retryAndWrapExceptionIfNecessary(Callable callable, Token } throw handleWrapException(message, cause); + } else if (httpe.getStatusCode() == 400 && new String(httpe.getBytes()).contains("invalid_resource_id")) { + throw new ResourceNotFoundException(message, cause); } } diff --git a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/aggregated/AggregatePolicyProviderFactory.java b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/aggregated/AggregatePolicyProviderFactory.java index 476d1823aee5..eb8d07e9f712 100644 --- a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/aggregated/AggregatePolicyProviderFactory.java +++ b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/aggregated/AggregatePolicyProviderFactory.java @@ -54,7 +54,7 @@ public PolicyProvider create(AuthorizationProvider authorization) { @Override public PolicyProvider create(KeycloakSession session) { - return null; + return provider; } @Override diff --git a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/client/ClientPolicyProvider.java b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/client/ClientPolicyProvider.java index b77fb2f79f93..87680a34a655 100644 --- a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/client/ClientPolicyProvider.java +++ b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/client/ClientPolicyProvider.java @@ -30,11 +30,12 @@ public void evaluate(Evaluation evaluation) { for (String client : representation.getClients()) { ClientModel clientModel = realm.getClientById(client); - - if (context.getAttributes().containsValue("kc.client.id", clientModel.getClientId())) { - evaluation.grant(); - logger.debugf("Client policy %s matched with client %s and was granted", evaluation.getPolicy().getName(), clientModel.getClientId()); - return; + if (clientModel != null) { + if (context.getAttributes().containsValue("kc.client.id", clientModel.getClientId())) { + evaluation.grant(); + logger.debugf("Client policy %s matched with client %s and was granted", evaluation.getPolicy().getName(), clientModel.getClientId()); + return; + } } } } diff --git a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/client/ClientPolicyProviderFactory.java b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/client/ClientPolicyProviderFactory.java index bf63e2a82c08..15ada99cd943 100644 --- a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/client/ClientPolicyProviderFactory.java +++ b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/client/ClientPolicyProviderFactory.java @@ -108,7 +108,7 @@ public void onExport(Policy policy, PolicyRepresentation representation, Authori @Override public PolicyProvider create(KeycloakSession session) { - return null; + return provider; } @Override diff --git a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/group/GroupPolicyProvider.java b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/group/GroupPolicyProvider.java index 91d47e11d7cf..9813acc1252a 100644 --- a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/group/GroupPolicyProvider.java +++ b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/group/GroupPolicyProvider.java @@ -20,22 +20,33 @@ import java.util.List; import java.util.function.BiFunction; +import java.util.stream.Collectors; +import java.util.stream.Stream; import org.jboss.logging.Logger; import org.keycloak.authorization.AuthorizationProvider; import org.keycloak.authorization.attribute.Attributes; import org.keycloak.authorization.attribute.Attributes.Entry; import org.keycloak.authorization.model.Policy; +import org.keycloak.authorization.model.ResourceServer; import org.keycloak.authorization.policy.evaluation.Evaluation; +import org.keycloak.authorization.fgap.evaluation.partial.PartialEvaluationPolicyProvider; import org.keycloak.authorization.policy.provider.PolicyProvider; +import org.keycloak.authorization.store.PolicyStore; +import org.keycloak.authorization.store.StoreFactory; +import org.keycloak.models.ClientModel; import org.keycloak.models.GroupModel; +import org.keycloak.models.KeycloakSession; import org.keycloak.models.RealmModel; +import org.keycloak.models.UserModel; +import org.keycloak.models.utils.ModelToRepresentation; import org.keycloak.representations.idm.authorization.GroupPolicyRepresentation; +import org.keycloak.representations.idm.authorization.ResourceType; /** * @author Pedro Igor */ -public class GroupPolicyProvider implements PolicyProvider { +public class GroupPolicyProvider implements PolicyProvider, PartialEvaluationPolicyProvider { private static final Logger logger = Logger.getLogger(GroupPolicyProvider.class); private final BiFunction representationFunction; @@ -56,6 +67,14 @@ public void evaluate(Evaluation evaluation) { groupsClaim = new Entry(policy.getGroupsClaim(), userGroups); } + if (isGranted(realm, policy, groupsClaim)) { + evaluation.grant(); + } + + logger.debugf("Groups policy %s evaluated to %s with identity groups %s", policy.getName(), evaluation.getEffect(), groupsClaim); + } + + private boolean isGranted(RealmModel realm, GroupPolicyRepresentation policy, Attributes.Entry groupsClaim) { for (GroupPolicyRepresentation.GroupDefinition definition : policy.getGroups()) { GroupModel allowedGroup = realm.getGroupById(definition.getId()); @@ -69,19 +88,46 @@ public void evaluate(Evaluation evaluation) { if (group.indexOf('/') != -1) { String allowedGroupPath = buildGroupPath(allowedGroup); if (group.equals(allowedGroupPath) || (definition.isExtendChildren() && group.startsWith(allowedGroupPath))) { - evaluation.grant(); - return; + return true; } } // in case the group from the claim does not represent a path, we just check an exact name match if (group.equals(allowedGroup.getName())) { - evaluation.grant(); - return; + return true; } } } - logger.debugf("Groups policy %s evaluated to %s with identity groups %s", policy.getName(), evaluation.getEffect(), groupsClaim); + + return false; + } + + @Override + public Stream getPermissions(KeycloakSession session, ResourceType resourceType, UserModel user) { + AuthorizationProvider provider = session.getProvider(AuthorizationProvider.class); + RealmModel realm = session.getContext().getRealm(); + ClientModel adminPermissionsClient = realm.getAdminPermissionsClient(); + StoreFactory storeFactory = provider.getStoreFactory(); + ResourceServer resourceServer = storeFactory.getResourceServerStore().findByClient(adminPermissionsClient); + PolicyStore policyStore = storeFactory.getPolicyStore(); + List groupIds = user.getGroupsStream().map(GroupModel::getId).toList(); + + return policyStore.findDependentPolicies(resourceServer, resourceType.getType(), GroupPolicyProviderFactory.ID, "groups", groupIds); + } + + @Override + public boolean evaluate(KeycloakSession session, Policy policy, UserModel subject) { + RealmModel realm = session.getContext().getRealm(); + AuthorizationProvider authorizationProvider = session.getProvider(AuthorizationProvider.class); + GroupPolicyRepresentation groupPolicy = representationFunction.apply(policy, authorizationProvider); + List userGroups = subject.getGroupsStream().map(ModelToRepresentation::buildGroupPath) + .collect(Collectors.toList()); + return isGranted(realm, groupPolicy, new Entry(groupPolicy.getGroupsClaim(), userGroups)); + } + + @Override + public boolean supports(Policy policy) { + return GroupPolicyProviderFactory.ID.equals(policy.getType()); } @Override diff --git a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/group/GroupPolicyProviderFactory.java b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/group/GroupPolicyProviderFactory.java index 30fd4f9acb6b..b636ddca2a9c 100644 --- a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/group/GroupPolicyProviderFactory.java +++ b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/group/GroupPolicyProviderFactory.java @@ -48,11 +48,13 @@ */ public class GroupPolicyProviderFactory implements PolicyProviderFactory { + public static final String ID = "group"; + private GroupPolicyProvider provider = new GroupPolicyProvider(this::toRepresentation); @Override public String getId() { - return "group"; + return ID; } @Override diff --git a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/js/JSPolicyProviderFactory.java b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/js/JSPolicyProviderFactory.java index bc538a1b658a..7a901e7b7db4 100644 --- a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/js/JSPolicyProviderFactory.java +++ b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/js/JSPolicyProviderFactory.java @@ -39,7 +39,7 @@ public PolicyProvider create(AuthorizationProvider authorization) { @Override public PolicyProvider create(KeycloakSession session) { - return null; + return provider; } @Override diff --git a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/permission/ResourcePolicyProviderFactory.java b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/permission/ResourcePolicyProviderFactory.java index 77b6ec17ef31..51f45c463320 100644 --- a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/permission/ResourcePolicyProviderFactory.java +++ b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/permission/ResourcePolicyProviderFactory.java @@ -64,7 +64,7 @@ public ResourcePermissionRepresentation toRepresentation(Policy policy, Authoriz @Override public PolicyProvider create(KeycloakSession session) { - return null; + return provider; } @Override diff --git a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/permission/ScopePolicyProviderFactory.java b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/permission/ScopePolicyProviderFactory.java index 0be56fcfa556..4dc3eb6ccd74 100644 --- a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/permission/ScopePolicyProviderFactory.java +++ b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/permission/ScopePolicyProviderFactory.java @@ -17,6 +17,7 @@ package org.keycloak.authorization.policy.provider.permission; import org.keycloak.Config; +import org.keycloak.authorization.fgap.AdminPermissionsSchema; import org.keycloak.authorization.AuthorizationProvider; import org.keycloak.authorization.model.Policy; import org.keycloak.authorization.policy.provider.PolicyProvider; @@ -33,7 +34,8 @@ */ public class ScopePolicyProviderFactory implements PolicyProviderFactory { - private ScopePolicyProvider provider = new ScopePolicyProvider(); + public static final String ID = "scope"; + private final ScopePolicyProvider provider = new ScopePolicyProvider(); @Override public String getName() { @@ -52,7 +54,7 @@ public PolicyProvider create(AuthorizationProvider authorization) { @Override public PolicyProvider create(KeycloakSession session) { - return null; + return provider; } @Override @@ -77,9 +79,14 @@ public void onUpdate(Policy policy, ScopePermissionRepresentation representation updateResourceType(policy, representation); } + @Override + public void onRemove(Policy policy, AuthorizationProvider authorization) { + AdminPermissionsSchema.SCHEMA.removeOrphanResources(policy, authorization); + } + private void updateResourceType(Policy policy, ScopePermissionRepresentation representation) { if (representation != null) { - Map config = new HashMap(policy.getConfig()); + Map config = new HashMap<>(policy.getConfig()); config.compute("defaultResourceType", (key, value) -> { String resourceType = representation.getResourceType(); @@ -107,6 +114,6 @@ public void close() { @Override public String getId() { - return "scope"; + return ID; } } diff --git a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/role/RolePolicyProvider.java b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/role/RolePolicyProvider.java index c73371f7dfbb..4d0342555761 100644 --- a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/role/RolePolicyProvider.java +++ b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/role/RolePolicyProvider.java @@ -17,16 +17,23 @@ */ package org.keycloak.authorization.policy.provider.role; +import java.util.List; import java.util.Set; import java.util.function.BiFunction; +import java.util.stream.Stream; import org.jboss.logging.Logger; import org.keycloak.authorization.AuthorizationProvider; import org.keycloak.authorization.attribute.Attributes.Entry; import org.keycloak.authorization.identity.Identity; +import org.keycloak.authorization.identity.UserModelIdentity; import org.keycloak.authorization.model.Policy; +import org.keycloak.authorization.model.ResourceServer; import org.keycloak.authorization.policy.evaluation.Evaluation; +import org.keycloak.authorization.fgap.evaluation.partial.PartialEvaluationPolicyProvider; import org.keycloak.authorization.policy.provider.PolicyProvider; +import org.keycloak.authorization.store.PolicyStore; +import org.keycloak.authorization.store.StoreFactory; import org.keycloak.models.ClientModel; import org.keycloak.models.KeycloakSession; import org.keycloak.models.RealmModel; @@ -34,12 +41,15 @@ import org.keycloak.models.UserModel; import org.keycloak.models.UserProvider; import org.keycloak.representations.JsonWebToken; +import org.keycloak.representations.idm.authorization.ResourceType; import org.keycloak.representations.idm.authorization.RolePolicyRepresentation; +import static org.keycloak.models.utils.RoleUtils.getDeepUserRoleMappings; + /** * @author Pedro Igor */ -public class RolePolicyProvider implements PolicyProvider { +public class RolePolicyProvider implements PolicyProvider, PartialEvaluationPolicyProvider { private final BiFunction representationFunction; @@ -53,11 +63,21 @@ public RolePolicyProvider(BiFunction roleIds = policyRep.getRoles(); AuthorizationProvider authorizationProvider = evaluation.getAuthorizationProvider(); RealmModel realm = authorizationProvider.getKeycloakSession().getContext().getRealm(); Identity identity = evaluation.getContext().getIdentity(); + if (isGranted(realm, authorizationProvider, policyRep, identity)) { + evaluation.grant(); + } + + logger.debugf("policy %s evaluated with status %s on identity %s", policy.getName(), evaluation.getEffect(), identity.getId()); + } + + private boolean isGranted(RealmModel realm, AuthorizationProvider authorizationProvider, RolePolicyRepresentation policyRep, Identity identity) { + Set roleIds = policyRep.getRoles(); + boolean granted = false; + for (RolePolicyRepresentation.RoleDefinition roleDefinition : roleIds) { RoleModel role = realm.getRoleById(roleDefinition.getId()); @@ -66,14 +86,14 @@ public void evaluate(Evaluation evaluation) { boolean hasRole = hasRole(identity, role, realm, authorizationProvider, isFetchRoles); if (!hasRole && roleDefinition.isRequired() != null && roleDefinition.isRequired()) { - evaluation.deny(); - return; + return false; } else if (hasRole) { - evaluation.grant(); + granted = true; } } } - logger.debugf("policy %s evaluated with status %s on identity %s", policy.getName(), evaluation.getEffect(), identity.getId()); + + return granted; } private boolean hasRole(Identity identity, RoleModel role, RealmModel realm, AuthorizationProvider authorizationProvider, boolean fetchRoles) { @@ -111,4 +131,30 @@ private UserModel getSubject(Identity identity, RealmModel realm, AuthorizationP public void close() { } + + @Override + public Stream getPermissions(KeycloakSession session, ResourceType resourceType, UserModel subject) { + AuthorizationProvider provider = session.getProvider(AuthorizationProvider.class); + RealmModel realm = session.getContext().getRealm(); + ClientModel adminPermissionsClient = realm.getAdminPermissionsClient(); + StoreFactory storeFactory = provider.getStoreFactory(); + ResourceServer resourceServer = storeFactory.getResourceServerStore().findByClient(adminPermissionsClient); + PolicyStore policyStore = storeFactory.getPolicyStore(); + List roleIds = getDeepUserRoleMappings(subject).stream().map(RoleModel::getId).toList(); + Stream policies = Stream.of(); + + return Stream.concat(policies, policyStore.findDependentPolicies(resourceServer, resourceType.getType(), RolePolicyProviderFactory.ID, "roles", roleIds)); + } + + @Override + public boolean evaluate(KeycloakSession session, Policy policy, UserModel adminUser) { + RealmModel realm = session.getContext().getRealm(); + AuthorizationProvider authorizationProvider = session.getProvider(AuthorizationProvider.class); + return isGranted(realm, authorizationProvider, representationFunction.apply(policy, authorizationProvider), new UserModelIdentity(realm, adminUser)); + } + + @Override + public boolean supports(Policy policy) { + return RolePolicyProviderFactory.ID.equals(policy.getType()); + } } diff --git a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/role/RolePolicyProviderFactory.java b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/role/RolePolicyProviderFactory.java index 8b9e3c1c9653..bd66d0b478c2 100644 --- a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/role/RolePolicyProviderFactory.java +++ b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/role/RolePolicyProviderFactory.java @@ -42,6 +42,7 @@ import java.util.LinkedHashSet; import java.util.Map; import java.util.Set; +import java.util.regex.Pattern; import java.util.stream.Collectors; /** @@ -49,6 +50,7 @@ */ public class RolePolicyProviderFactory implements PolicyProviderFactory { + public static final String ID = "role"; private RolePolicyProvider provider = new RolePolicyProvider(this::toRepresentation); @Override @@ -192,7 +194,7 @@ public void close() { @Override public String getId() { - return "role"; + return ID; } private Set getRoles(String rawRoles, RealmModel realm) { @@ -210,6 +212,8 @@ private Set getRoles(String rawRoles, RealmModel realm) { return Collections.emptySet(); } + public static final Pattern UUID_PATTERN = Pattern.compile("[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}"); + private RoleModel getRole(RolePolicyRepresentation.RoleDefinition definition, RealmModel realm) { String roleName = definition.getId(); String clientId = null; @@ -223,10 +227,13 @@ private RoleModel getRole(RolePolicyRepresentation.RoleDefinition definition, Re RoleModel role; if (clientId == null) { - role = realm.getRole(roleName); + // if the role name looks like a UUID, it is likely that it is a role ID. Then do this look-up first to avoid hitting the database twice + // TODO: In a future version of the auth feature, make this more strict to avoid the double lookup and any ambiguity + boolean looksLikeAUuid = UUID_PATTERN.matcher(roleName).matches(); + role = looksLikeAUuid ? realm.getRoleById(roleName) : realm.getRole(roleName); if (role == null) { - role = realm.getRoleById(roleName); + role = !looksLikeAUuid ? realm.getRoleById(roleName) : realm.getRole(roleName);; } } else { ClientModel client = realm.getClientByClientId(clientId); diff --git a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/time/TimePolicyProviderFactory.java b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/time/TimePolicyProviderFactory.java index 90b86ad0289b..e83a0fe269dd 100644 --- a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/time/TimePolicyProviderFactory.java +++ b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/time/TimePolicyProviderFactory.java @@ -56,7 +56,7 @@ public PolicyProvider create(AuthorizationProvider authorization) { @Override public PolicyProvider create(KeycloakSession session) { - return null; + return provider; } @Override diff --git a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/user/UserPolicyProvider.java b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/user/UserPolicyProvider.java index 3ec56cf48d3a..5f75c26afa3b 100644 --- a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/user/UserPolicyProvider.java +++ b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/user/UserPolicyProvider.java @@ -18,19 +18,28 @@ package org.keycloak.authorization.policy.provider.user; import java.util.function.BiFunction; +import java.util.stream.Stream; import org.jboss.logging.Logger; import org.keycloak.authorization.AuthorizationProvider; import org.keycloak.authorization.model.Policy; +import org.keycloak.authorization.model.ResourceServer; import org.keycloak.authorization.policy.evaluation.Evaluation; -import org.keycloak.authorization.policy.evaluation.EvaluationContext; +import org.keycloak.authorization.fgap.evaluation.partial.PartialEvaluationPolicyProvider; import org.keycloak.authorization.policy.provider.PolicyProvider; +import org.keycloak.authorization.store.PolicyStore; +import org.keycloak.authorization.store.StoreFactory; +import org.keycloak.models.ClientModel; +import org.keycloak.models.KeycloakSession; +import org.keycloak.models.RealmModel; +import org.keycloak.models.UserModel; +import org.keycloak.representations.idm.authorization.ResourceType; import org.keycloak.representations.idm.authorization.UserPolicyRepresentation; /** * @author Pedro Igor */ -public class UserPolicyProvider implements PolicyProvider { +public class UserPolicyProvider implements PolicyProvider, PartialEvaluationPolicyProvider { private static final Logger logger = Logger.getLogger(UserPolicyProvider.class); @@ -42,16 +51,37 @@ public UserPolicyProvider(BiFunction getPermissions(KeycloakSession session, ResourceType resourceType, UserModel subject) { + AuthorizationProvider provider = session.getProvider(AuthorizationProvider.class); + RealmModel realm = session.getContext().getRealm(); + ClientModel adminPermissionsClient = realm.getAdminPermissionsClient(); + StoreFactory storeFactory = provider.getStoreFactory(); + ResourceServer resourceServer = storeFactory.getResourceServerStore().findByClient(adminPermissionsClient); + PolicyStore policyStore = storeFactory.getPolicyStore(); + + return policyStore.findDependentPolicies(resourceServer, resourceType.getType(), UserPolicyProviderFactory.ID, "users", subject.getId()); + } + + @Override + public boolean evaluate(KeycloakSession session, Policy policy, UserModel adminUser) { + return policy.getConfig().getOrDefault("users", "").contains(adminUser.getId()); + } + + @Override + public boolean supports(Policy policy) { + return UserPolicyProviderFactory.ID.equals(policy.getType()); } @Override diff --git a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/user/UserPolicyProviderFactory.java b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/user/UserPolicyProviderFactory.java index 103587c568c6..42b807d33608 100644 --- a/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/user/UserPolicyProviderFactory.java +++ b/authz/policy/common/src/main/java/org/keycloak/authorization/policy/provider/user/UserPolicyProviderFactory.java @@ -46,6 +46,8 @@ */ public class UserPolicyProviderFactory implements PolicyProviderFactory { + public static final String ID = "user"; + private UserPolicyProvider provider = new UserPolicyProvider(this::toRepresentation); @Override @@ -65,7 +67,7 @@ public PolicyProvider create(AuthorizationProvider authorization) { @Override public PolicyProvider create(KeycloakSession session) { - return null; + return provider; } @Override @@ -193,6 +195,6 @@ public void close() { @Override public String getId() { - return "user"; + return ID; } } diff --git a/boms/pom.xml b/boms/pom.xml index 4d105b60615e..5a54a03310e9 100644 --- a/boms/pom.xml +++ b/boms/pom.xml @@ -45,14 +45,21 @@ - https://s01.oss.sonatype.org/ - jboss-releases-repository - keycloak - https://s01.oss.sonatype.org/service/local/staging/deploy/maven2/ - jboss-snapshots-repository - https://s01.oss.sonatype.org/content/repositories/snapshots/ + 0.7.0 + 1.0.7 + + + keycloak-publish + https://central.sonatype.com/ + + + keycloak-publish + https://central.sonatype.com/repository/maven-snapshots/ + + + spi @@ -105,17 +112,28 @@ + - nexus-staging + central-staging - org.sonatype.plugins - nexus-staging-maven-plugin + org.sonatype.central + central-publishing-maven-plugin + ${central.publishing.plugin.version} + true + + true + all + keycloak-${project.version} + keycloak-publish + published + + nexus3-staging @@ -123,6 +141,7 @@ org.sonatype.plugins nxrm3-maven-plugin + ${nexus3.staging.plugin.version} true ${jboss.releases.repo.id} diff --git a/common/src/main/java/org/keycloak/common/ClientConnection.java b/common/src/main/java/org/keycloak/common/ClientConnection.java index 3e6c124da8b6..909f14b86ee8 100755 --- a/common/src/main/java/org/keycloak/common/ClientConnection.java +++ b/common/src/main/java/org/keycloak/common/ClientConnection.java @@ -26,9 +26,12 @@ public interface ClientConnection { /** - * @return the address as a string if it is available, otherwise null + * @return the IP address as a string if it is available, otherwise null */ String getRemoteAddr(); + /** + * @return the remote host, which will be an IP address or whatever is provided via proxy headers + */ String getRemoteHost(); int getRemotePort(); diff --git a/common/src/main/java/org/keycloak/common/Profile.java b/common/src/main/java/org/keycloak/common/Profile.java index 09e0ca6866c0..4eb1a6fa7310 100755 --- a/common/src/main/java/org/keycloak/common/Profile.java +++ b/common/src/main/java/org/keycloak/common/Profile.java @@ -55,7 +55,7 @@ public enum Feature { ADMIN_FINE_GRAINED_AUTHZ("Fine-Grained Admin Permissions", Type.PREVIEW, 1), - ADMIN_FINE_GRAINED_AUTHZ_V2("Fine-Grained Admin Permissions version 2", Type.EXPERIMENTAL, 2, Feature.AUTHORIZATION), + ADMIN_FINE_GRAINED_AUTHZ_V2("Fine-Grained Admin Permissions version 2", Type.DEFAULT, 2, Feature.AUTHORIZATION), ADMIN_API("Admin API", Type.DEFAULT), @@ -73,7 +73,9 @@ public enum Feature { SCRIPTS("Write custom authenticators using JavaScript", Type.PREVIEW), - TOKEN_EXCHANGE("Token Exchange Service", Type.PREVIEW), + TOKEN_EXCHANGE("Token Exchange Service", Type.PREVIEW, 1), + TOKEN_EXCHANGE_STANDARD_V2("Standard Token Exchange version 2", Type.DEFAULT, 2), + TOKEN_EXCHANGE_EXTERNAL_INTERNAL_V2("External to Internal Token Exchange version 2", Type.EXPERIMENTAL, 2), WEB_AUTHN("W3C Web Authentication (WebAuthn)", Type.DEFAULT), @@ -92,9 +94,9 @@ public enum Feature { // Check if kerberos is available in underlying JVM and auto-detect if feature should be enabled or disabled by default based on that KERBEROS("Kerberos", Type.DEFAULT, 1, () -> KerberosJdkProvider.getProvider().isKerberosAvailable()), - RECOVERY_CODES("Recovery codes", Type.PREVIEW), + RECOVERY_CODES("Recovery codes", Type.DEFAULT), - UPDATE_EMAIL("Update Email Action", Type.PREVIEW), + UPDATE_EMAIL("Update Email Action", Type.DEFAULT), FIPS("FIPS 140-2 mode", Type.DISABLED_BY_DEFAULT), @@ -122,14 +124,22 @@ public enum Feature { ORGANIZATION("Organization support within realms", Type.DEFAULT), - PASSKEYS("Passkeys", Type.PREVIEW), + PASSKEYS("Passkeys", Type.PREVIEW, Feature.WEB_AUTHN), - CACHE_EMBEDDED_REMOTE_STORE("Support for remote-store in embedded Infinispan caches", Type.EXPERIMENTAL), + USER_EVENT_METRICS("Collect metrics based on user events", Type.DEFAULT), - USER_EVENT_METRICS("Collect metrics based on user events", Type.PREVIEW), + IPA_TUURA_FEDERATION("IPA-Tuura user federation provider", Type.EXPERIMENTAL), - IPA_TUURA_FEDERATION("IPA-Tuura user federation provider", Type.EXPERIMENTAL) - ; + LOGOUT_ALL_SESSIONS_V1("Logout all sessions logs out only regular sessions", Type.DEPRECATED, 1), + + ROLLING_UPDATES_V1("Rolling Updates", Type.DEFAULT, 1), + ROLLING_UPDATES_V2("Rolling Updates for patch releases", Type.PREVIEW, 2), + + /** + * @see Deprecate for removal the Instagram social broker. + */ + @Deprecated + INSTAGRAM_BROKER("Instagram Identity Broker", Type.DEPRECATED, 1); private final Type type; private final String label; @@ -404,6 +414,13 @@ public static boolean isFeatureEnabled(Feature feature) { return getInstance().features.get(feature); } + public static boolean isAnyVersionOfFeatureEnabled(Feature feature) { + return isFeatureEnabled(feature) || + getInstance().getEnabledFeatures() + .stream() + .anyMatch(f -> Objects.equals(f.getUnversionedKey(), feature.getUnversionedKey())); + } + public ProfileName getName() { return profileName; } @@ -416,6 +433,10 @@ public Set getDisabledFeatures() { return features.entrySet().stream().filter(e -> !e.getValue()).map(Map.Entry::getKey).collect(Collectors.toSet()); } + public Set getEnabledFeatures() { + return features.entrySet().stream().filter(Map.Entry::getValue).map(Map.Entry::getKey).collect(Collectors.toSet()); + } + /** * @return all features of type "preview" or "preview_disabled_by_default" */ diff --git a/common/src/main/java/org/keycloak/common/crypto/CryptoIntegration.java b/common/src/main/java/org/keycloak/common/crypto/CryptoIntegration.java index c12ea3e552e9..7f9c35bd6e44 100644 --- a/common/src/main/java/org/keycloak/common/crypto/CryptoIntegration.java +++ b/common/src/main/java/org/keycloak/common/crypto/CryptoIntegration.java @@ -94,7 +94,7 @@ public static String dumpSecurityProperties() { } public static void setProvider(CryptoProvider provider) { - logger.debugf("Using the crypto provider: %s", provider.getClass().getName()); + logger.debugf("Using the crypto provider: %s", provider != null ? provider.getClass().getName() : "null"); cryptoProvider = provider; } } diff --git a/common/src/main/java/org/keycloak/common/util/DerUtils.java b/common/src/main/java/org/keycloak/common/util/DerUtils.java index f00e547d5240..5652d5475534 100755 --- a/common/src/main/java/org/keycloak/common/util/DerUtils.java +++ b/common/src/main/java/org/keycloak/common/util/DerUtils.java @@ -52,10 +52,7 @@ public static PrivateKey decodePrivateKey(InputStream is) dis.readFully(keyBytes); dis.close(); - PKCS8EncodedKeySpec spec = - new PKCS8EncodedKeySpec(keyBytes); - KeyFactory kf =CryptoIntegration.getProvider().getKeyFactory("RSA"); - return kf.generatePrivate(spec); + return decodePrivateKey(keyBytes); } public static PublicKey decodePublicKey(byte[] der) throws NoSuchAlgorithmException, InvalidKeySpecException, NoSuchProviderException { @@ -79,7 +76,14 @@ public static X509Certificate decodeCertificate(InputStream is) throws Exception public static PrivateKey decodePrivateKey(byte[] der) throws NoSuchAlgorithmException, InvalidKeySpecException, NoSuchProviderException { PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec(der); - KeyFactory kf = CryptoIntegration.getProvider().getKeyFactory("RSA"); - return kf.generatePrivate(spec); - } + String[] algorithms = { "RSA", "EC" }; + for (String algorithm : algorithms) { + try { + return CryptoIntegration.getProvider().getKeyFactory(algorithm).generatePrivate(spec); + } catch (InvalidKeySpecException e) { + // Ignore and try the next algorithm. + } + } + throw new InvalidKeySpecException("Unable to decode the private key with supported algorithms: " + String.join(", ", algorithms)); + } } diff --git a/common/src/main/java/org/keycloak/common/util/KeycloakUriBuilder.java b/common/src/main/java/org/keycloak/common/util/KeycloakUriBuilder.java index b9e56ea0189c..2591d6879c74 100755 --- a/common/src/main/java/org/keycloak/common/util/KeycloakUriBuilder.java +++ b/common/src/main/java/org/keycloak/common/util/KeycloakUriBuilder.java @@ -80,7 +80,6 @@ public KeycloakUriBuilder clone() { } private static final Pattern opaqueUri = Pattern.compile("^([^:/?#]+):([^/].*)"); - private static final Pattern hierarchicalUri = Pattern.compile("^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\\?([^#]*))?(#(.*))?"); private static final Pattern hostPortPattern = Pattern.compile("([^/:]+):(\\d+)"); public static boolean compare(String s1, String s2) { @@ -139,13 +138,41 @@ public KeycloakUriBuilder uriTemplate(String uriTemplate) { return uri(uriTemplate, true); } - protected KeycloakUriBuilder parseHierarchicalUri(String uri, Matcher match, boolean template) { - boolean scheme = match.group(2) != null; - if (scheme) this.scheme = match.group(2); - String authority = match.group(4); + private String matchesHierarchicalUriPart(Map map, String s, String regex, String part) { + if (!s.isEmpty()) { + Matcher m = Pattern.compile(regex).matcher(s); + if (m.find()) { + map.put(part, m.group(1)); + return s.substring(m.end()); + } + } + return s; + } + + private Map matchesHierarchicalUri(final String uri) { + // hierarchicalUri regex: ^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\\?([^#]*))?(#(.*))? + Map result = new HashMap<>(); + // scheme + String s = matchesHierarchicalUriPart(result, uri, "^([^:/?#]+):", "scheme"); + // authority + s = matchesHierarchicalUriPart(result, s, "^//([^/?#]*)", "authority"); + // path + s = matchesHierarchicalUriPart(result, s, "^([^?#]*)", "path"); + // query + s = matchesHierarchicalUriPart(result, s, "^\\?([^#]*)", "query"); + // fragment + s = matchesHierarchicalUriPart(result, s, "^#(.*)", "fragment"); + // if the uri is parsed completely it is a valid uri + return s.isEmpty() ? result : null; + } + + protected KeycloakUriBuilder parseHierarchicalUri(String uri, Map match, boolean template) { + boolean scheme = match.get("scheme") != null; + if (scheme) this.scheme = match.get("scheme"); + String authority = match.get("authority"); if (authority != null) { this.authority = null; - String host = match.group(4); + String host = authority; int at = host.indexOf('@'); if (at > -1) { String user = host.substring(0, at); @@ -164,14 +191,14 @@ protected KeycloakUriBuilder parseHierarchicalUri(String uri, Matcher match, boo this.host = host; } } - if (match.group(5) != null) { - String group = match.group(5); + if (match.get("path") != null) { + String group = match.get("path"); if (!scheme && !"".equals(group) && !group.startsWith("/") && group.indexOf(':') > -1) throw new IllegalArgumentException("Illegal uri template: " + uri); if (!"".equals(group)) replacePath(group, template); } - if (match.group(7) != null) replaceQuery(match.group(7), template); - if (match.group(9) != null) fragment(match.group(9), template); + if (match.get("query") != null) replaceQuery(match.get("query"), template); + if (match.get("fragment") != null) fragment(match.get("fragment"), template); return this; } @@ -193,8 +220,8 @@ public KeycloakUriBuilder uri(String uri, boolean template) throws IllegalArgume this.ssp = opaque.group(2); return this; } else { - Matcher match = hierarchicalUri.matcher(uri); - if (match.matches()) { + Map match = matchesHierarchicalUri(uri); + if (match != null) { ssp = null; return parseHierarchicalUri(uri, match, template); } diff --git a/common/src/main/java/org/keycloak/common/util/NetworkUtils.java b/common/src/main/java/org/keycloak/common/util/NetworkUtils.java index 5889556a934c..5aaed8cba7d6 100644 --- a/common/src/main/java/org/keycloak/common/util/NetworkUtils.java +++ b/common/src/main/java/org/keycloak/common/util/NetworkUtils.java @@ -417,7 +417,7 @@ private static boolean checkForSolaris() { return checkForPresence("os.name", "sun"); } - private static boolean checkForWindows() { + public static boolean checkForWindows() { return checkForPresence("os.name", "win"); } diff --git a/common/src/main/java/org/keycloak/common/util/PathHelper.java b/common/src/main/java/org/keycloak/common/util/PathHelper.java index 4e5949d75a0e..1bcffbd9d951 100755 --- a/common/src/main/java/org/keycloak/common/util/PathHelper.java +++ b/common/src/main/java/org/keycloak/common/util/PathHelper.java @@ -40,7 +40,7 @@ public class PathHelper /** * A regex pattern that searches for a URI template parameter in the form of {*} */ - public static final Pattern URI_TEMPLATE_PATTERN = Pattern.compile("(\\{([^}]+)\\})"); + public static final Pattern URI_TEMPLATE_PATTERN = Pattern.compile("(\\{([^{}]+)\\})"); public static final char openCurlyReplacement = 6; public static final char closeCurlyReplacement = 7; diff --git a/common/src/main/java/org/keycloak/common/util/PemUtils.java b/common/src/main/java/org/keycloak/common/util/PemUtils.java index 483e7a1d0ecd..fca4be634f09 100755 --- a/common/src/main/java/org/keycloak/common/util/PemUtils.java +++ b/common/src/main/java/org/keycloak/common/util/PemUtils.java @@ -23,6 +23,9 @@ import java.security.PublicKey; import java.security.cert.Certificate; import java.security.cert.X509Certificate; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; import org.keycloak.common.crypto.CryptoIntegration; @@ -53,6 +56,20 @@ public static X509Certificate decodeCertificate(String cert) { return CryptoIntegration.getProvider().getPemUtils().decodeCertificate(cert); } + /** + * Decode one or more X509 Certificates from a PEM string (certificate bundle) + * + * @param certs + * @return + * @throws Exception + */ + public static X509Certificate[] decodeCertificates(String certs) { + return Arrays.stream(certs.split(END_CERT)) + .map(String::trim) + .filter(pemBlock -> !pemBlock.isEmpty()) + .map(pemBlock -> PemUtils.decodeCertificate(pemBlock + END_CERT)) + .toArray(X509Certificate[]::new); + } /** * Decode a Public Key from a PEM string diff --git a/common/src/main/java/org/keycloak/common/util/SecretGenerator.java b/common/src/main/java/org/keycloak/common/util/SecretGenerator.java index ff73e855eeec..20d8ad82cc93 100644 --- a/common/src/main/java/org/keycloak/common/util/SecretGenerator.java +++ b/common/src/main/java/org/keycloak/common/util/SecretGenerator.java @@ -30,6 +30,15 @@ private SecretGenerator() { public static SecretGenerator getInstance() { return instance; } + + public String generateSecureID() { + StringBuilder builder = new StringBuilder(instance.randomBytesHex(16)); + builder.insert(8, '-'); + builder.insert(13, '-'); + builder.insert(18, '-'); + builder.insert(23, '-'); + return builder.toString(); + } public String randomString() { return randomString(SECRET_LENGTH_256_BITS, ALPHANUM); @@ -56,6 +65,7 @@ public String randomString(int length, char[] symbols) { return new String(buf); } + public byte[] randomBytes() { return randomBytes(SECRET_LENGTH_256_BITS); } @@ -70,4 +80,37 @@ public byte[] randomBytes(int length) { return buf; } + public String randomBytesHex(int length) { + final StringBuilder sb = new StringBuilder(); + for (byte b : randomBytes(length)) { + sb.append(Character.forDigit((b >> 4) & 0xF, 16)); + sb.append(Character.forDigit((b & 0xF), 16)); + } + return sb.toString(); + } + + /** + * Returns the equivalent length for a destination alphabet to have the same + * entropy bits than a byte array random generated. + * + * @param byteLengthEntropy The desired entropy in bytes + * @param dstAlphabetLeng The length of the destination alphabet + * @return The equivalent length in destination alphabet to have the same entropy bits + */ + public static int equivalentEntropySize(int byteLengthEntropy, int dstAlphabetLeng) { + return equivalentEntropySize(byteLengthEntropy, 256, dstAlphabetLeng); + } + + /** + * Returns the equivalent length for a destination alphabet to have the same + * entropy bits than another source alphabet. + * + * @param length The length of the string encoded in source alphabet + * @param srcAlphabetLength The length of the source alphabet + * @param dstAlphabetLeng The length of the destination alphabet + * @return The equivalent length (same entropy) in destination alphabet for a string of length in source alphabet + */ + public static int equivalentEntropySize(int length, int srcAlphabetLength, int dstAlphabetLeng) { + return (int) Math.ceil(length * ((Math.log(srcAlphabetLength)) / (Math.log(dstAlphabetLeng)))); + } } diff --git a/common/src/test/java/org/keycloak/common/ProfileTest.java b/common/src/test/java/org/keycloak/common/ProfileTest.java index a45584a4d5db..d68b4a2d7f0c 100644 --- a/common/src/test/java/org/keycloak/common/ProfileTest.java +++ b/common/src/test/java/org/keycloak/common/ProfileTest.java @@ -27,7 +27,7 @@ public class ProfileTest { - private static final Profile.Feature DEFAULT_FEATURE = Profile.Feature.AUTHORIZATION; + private static final Profile.Feature DEFAULT_FEATURE = Profile.Feature.CLIENT_POLICIES; private static final Profile.Feature DISABLED_BY_DEFAULT_FEATURE = Profile.Feature.DOCKER; private static final Profile.Feature PREVIEW_FEATURE = Profile.Feature.TOKEN_EXCHANGE; private static final Profile.Feature EXPERIMENTAL_FEATURE = Profile.Feature.DYNAMIC_SCOPES; diff --git a/common/src/test/java/org/keycloak/common/crypto/CryptoIntegrationTest.java b/common/src/test/java/org/keycloak/common/crypto/CryptoIntegrationTest.java new file mode 100644 index 000000000000..170ae1ce3294 --- /dev/null +++ b/common/src/test/java/org/keycloak/common/crypto/CryptoIntegrationTest.java @@ -0,0 +1,36 @@ +package org.keycloak.common.crypto; + +import static org.junit.Assert.assertNull; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public class CryptoIntegrationTest { + private static CryptoProvider originalProvider; + + @BeforeClass + public static void keepOriginalProvider() { + CryptoIntegrationTest.originalProvider = getSelectedProvider(); + } + + // doing our best to avoid any side effects on other tests by restoring the initial state of CryptoIntegration + @AfterClass + public static void restoreOriginalProvider() { + CryptoIntegration.setProvider(originalProvider); + } + + @Test + public void canSetNullProvider() { + CryptoIntegration.setProvider(null); + assertNull(getSelectedProvider()); + } + + private static CryptoProvider getSelectedProvider() { + try { + return CryptoIntegration.getProvider(); + } catch (IllegalStateException e) { + return null; + } + } +} diff --git a/core/src/main/java/org/keycloak/AbstractOAuthClient.java b/core/src/main/java/org/keycloak/AbstractOAuthClient.java index 40ada8a3ee6c..ced1f3552c2a 100644 --- a/core/src/main/java/org/keycloak/AbstractOAuthClient.java +++ b/core/src/main/java/org/keycloak/AbstractOAuthClient.java @@ -19,9 +19,9 @@ import org.keycloak.common.enums.RelativeUrlsUsed; import org.keycloak.common.util.KeycloakUriBuilder; +import org.keycloak.common.util.SecretGenerator; import java.util.Map; -import java.util.UUID; import java.util.concurrent.atomic.AtomicLong; /** @@ -43,7 +43,7 @@ public class AbstractOAuthClient { protected boolean isSecure; protected boolean publicClient; protected String getStateCode() { - return counter.getAndIncrement() + "/" + UUID.randomUUID().toString(); + return counter.getAndIncrement() + "/" + SecretGenerator.getInstance().generateSecureID(); } public String getClientId() { diff --git a/core/src/main/java/org/keycloak/Config.java b/core/src/main/java/org/keycloak/Config.java index c7e38fa4b320..538f571a85cb 100755 --- a/core/src/main/java/org/keycloak/Config.java +++ b/core/src/main/java/org/keycloak/Config.java @@ -235,6 +235,13 @@ public static interface Scope { Scope scope(String... scope); + /** + * @deprecated since 26.3.0, to be removed + * + *
Was introduced for testing purposes and was not fully / correctly implements + * across Scope implementations + */ + @Deprecated Set getPropertyNames(); } } diff --git a/core/src/main/java/org/keycloak/TokenIdGenerator.java b/core/src/main/java/org/keycloak/TokenIdGenerator.java index fa085363a966..4320a7f69756 100755 --- a/core/src/main/java/org/keycloak/TokenIdGenerator.java +++ b/core/src/main/java/org/keycloak/TokenIdGenerator.java @@ -17,7 +17,8 @@ package org.keycloak; -import java.util.UUID; +import org.keycloak.common.util.SecretGenerator; + import java.util.concurrent.atomic.AtomicLong; /** @@ -28,6 +29,6 @@ public class TokenIdGenerator { private static final AtomicLong counter = new AtomicLong(); public static String generateId() { - return UUID.randomUUID().toString() + "-" + System.currentTimeMillis(); + return SecretGenerator.getInstance().generateSecureID() + "-" + System.currentTimeMillis(); } } diff --git a/core/src/main/java/org/keycloak/TokenVerifier.java b/core/src/main/java/org/keycloak/TokenVerifier.java index 70112ef89bc7..ac402d19ee08 100755 --- a/core/src/main/java/org/keycloak/TokenVerifier.java +++ b/core/src/main/java/org/keycloak/TokenVerifier.java @@ -120,7 +120,7 @@ public boolean test(JsonWebToken t) throws VerificationException { public static class TokenTypeCheck implements Predicate { - private static final TokenTypeCheck INSTANCE_DEFAULT_TOKEN_TYPE = new TokenTypeCheck(Arrays.asList(TokenUtil.TOKEN_TYPE_BEARER, TokenUtil.TOKEN_TYPE_DPOP)); + private static final TokenTypeCheck INSTANCE_DEFAULT_TOKEN_TYPE = new TokenTypeCheck(Arrays.asList(TokenUtil.TOKEN_TYPE_BEARER)); private final List tokenTypes; diff --git a/core/src/main/java/org/keycloak/jose/jwk/JWKParser.java b/core/src/main/java/org/keycloak/jose/jwk/JWKParser.java index 9cbb0a643a85..e1c9bd651279 100644 --- a/core/src/main/java/org/keycloak/jose/jwk/JWKParser.java +++ b/core/src/main/java/org/keycloak/jose/jwk/JWKParser.java @@ -25,6 +25,7 @@ import java.security.spec.ECPublicKeySpec; import java.security.spec.RSAPublicKeySpec; +import com.fasterxml.jackson.databind.JsonNode; import org.keycloak.common.crypto.CryptoIntegration; import org.keycloak.common.util.Base64Url; import org.keycloak.crypto.KeyType; @@ -70,10 +71,14 @@ public PublicKey toPublicKey() { throw new IllegalStateException("Not possible to convert to the publicKey. The jwk is not set"); } String keyType = jwk.getKeyType(); + + // subtypes may store properties differently while representing the same JWK, serializing it to nodes + // makes sure there is no difference when creating the keys + JsonNode normalizedJwkNode = JsonSerialization.writeValueAsNode(jwk); if (KeyType.RSA.equals(keyType)) { - return createRSAPublicKey(); + return createRSAPublicKey(normalizedJwkNode); } else if (KeyType.EC.equals(keyType)) { - return createECPublicKey(); + return createECPublicKey(normalizedJwkNode); } else if (KeyType.OKP.equals(keyType)) { return JWKBuilder.EdEC_UTILS.createOKPPublicKey(jwk); } else { @@ -81,16 +86,13 @@ public PublicKey toPublicKey() { } } - protected PublicKey createECPublicKey() { - /* Check if jwk.getOtherClaims return an empty map */ - if (jwk.getOtherClaims().size() == 0) { - throw new RuntimeException("JWK Otherclaims map is empty."); - } + private static PublicKey createECPublicKey(JsonNode jwk) { + /* Try retrieving the necessary fields */ - String crv = (String) jwk.getOtherClaims().get(ECPublicJWK.CRV); - String xStr = (String) jwk.getOtherClaims().get(ECPublicJWK.X); - String yStr = (String) jwk.getOtherClaims().get(ECPublicJWK.Y); + String crv = jwk.path(ECPublicJWK.CRV).asText(null); + String xStr = jwk.get(ECPublicJWK.X).asText(null); + String yStr = jwk.get(ECPublicJWK.Y).asText(null); /* Check if the retrieving of necessary fields success */ if (crv == null || xStr == null || yStr == null) { @@ -128,9 +130,9 @@ protected PublicKey createECPublicKey() { } } - protected PublicKey createRSAPublicKey() { - BigInteger modulus = new BigInteger(1, Base64Url.decode(jwk.getOtherClaims().get(RSAPublicJWK.MODULUS).toString())); - BigInteger publicExponent = new BigInteger(1, Base64Url.decode(jwk.getOtherClaims().get(RSAPublicJWK.PUBLIC_EXPONENT).toString())); + private static PublicKey createRSAPublicKey(JsonNode jwk) { + BigInteger modulus = new BigInteger(1, Base64Url.decode(jwk.path(RSAPublicJWK.MODULUS).asText(null))); + BigInteger publicExponent = new BigInteger(1, Base64Url.decode(jwk.path(RSAPublicJWK.PUBLIC_EXPONENT).asText(null))); try { KeyFactory kf = KeyFactory.getInstance("RSA"); diff --git a/core/src/main/java/org/keycloak/protocol/oidc/client/authentication/JWTClientCredentialsProvider.java b/core/src/main/java/org/keycloak/protocol/oidc/client/authentication/JWTClientCredentialsProvider.java index 5d268be8acb7..759842284b23 100644 --- a/core/src/main/java/org/keycloak/protocol/oidc/client/authentication/JWTClientCredentialsProvider.java +++ b/core/src/main/java/org/keycloak/protocol/oidc/client/authentication/JWTClientCredentialsProvider.java @@ -20,11 +20,11 @@ import java.security.KeyPair; import java.security.PublicKey; import java.util.Map; -import java.util.UUID; import org.keycloak.OAuth2Constants; import org.keycloak.common.util.KeyUtils; import org.keycloak.common.util.KeystoreUtil; +import org.keycloak.common.util.SecretGenerator; import org.keycloak.common.util.Time; import org.keycloak.crypto.Algorithm; import org.keycloak.crypto.AsymmetricSignatureSignerContext; @@ -177,7 +177,7 @@ public String createSignedRequestToken(String clientId, String realmInfoUrl) { protected JsonWebToken createRequestToken(String clientId, String realmInfoUrl) { JsonWebToken reqToken = new JsonWebToken(); - reqToken.id(UUID.randomUUID().toString()); + reqToken.id(SecretGenerator.getInstance().generateSecureID()); reqToken.issuer(clientId); reqToken.subject(clientId); reqToken.audience(realmInfoUrl); diff --git a/core/src/main/java/org/keycloak/protocol/oidc/client/authentication/JWTClientSecretCredentialsProvider.java b/core/src/main/java/org/keycloak/protocol/oidc/client/authentication/JWTClientSecretCredentialsProvider.java index a53a6b543bf8..210cb4be5ae1 100644 --- a/core/src/main/java/org/keycloak/protocol/oidc/client/authentication/JWTClientSecretCredentialsProvider.java +++ b/core/src/main/java/org/keycloak/protocol/oidc/client/authentication/JWTClientSecretCredentialsProvider.java @@ -18,13 +18,13 @@ import java.nio.charset.StandardCharsets; import java.util.Map; -import java.util.UUID; import javax.crypto.SecretKey; import javax.crypto.spec.SecretKeySpec; import org.jboss.logging.Logger; import org.keycloak.OAuth2Constants; +import org.keycloak.common.util.SecretGenerator; import org.keycloak.common.util.Time; import org.keycloak.crypto.Algorithm; import org.keycloak.crypto.JavaAlgorithm; @@ -126,7 +126,7 @@ protected JsonWebToken createRequestToken(String clientId, String realmInfoUrl) // JWT claims is the same as one by private_key_jwt JsonWebToken reqToken = new JsonWebToken(); - reqToken.id(UUID.randomUUID().toString()); + reqToken.id(SecretGenerator.getInstance().generateSecureID()); reqToken.issuer(clientId); reqToken.subject(clientId); reqToken.audience(realmInfoUrl); diff --git a/core/src/main/java/org/keycloak/representations/JsonWebToken.java b/core/src/main/java/org/keycloak/representations/JsonWebToken.java index a969273fde24..ce1a98fb1e76 100755 --- a/core/src/main/java/org/keycloak/representations/JsonWebToken.java +++ b/core/src/main/java/org/keycloak/representations/JsonWebToken.java @@ -41,6 +41,7 @@ */ public class JsonWebToken implements Serializable, Token { public static final String AZP = "azp"; + public static final String AUD = "aud"; public static final String SUBJECT = "sub"; @JsonProperty("jti") @@ -52,7 +53,7 @@ public class JsonWebToken implements Serializable, Token { @JsonProperty("iss") protected String issuer; - @JsonProperty("aud") + @JsonProperty(AUD) @JsonSerialize(using = StringOrArraySerializer.class) @JsonDeserialize(using = StringOrArrayDeserializer.class) protected String[] audience; diff --git a/core/src/main/java/org/keycloak/representations/KeyStoreConfig.java b/core/src/main/java/org/keycloak/representations/KeyStoreConfig.java index 782669472c33..0364417eb5b8 100644 --- a/core/src/main/java/org/keycloak/representations/KeyStoreConfig.java +++ b/core/src/main/java/org/keycloak/representations/KeyStoreConfig.java @@ -29,6 +29,8 @@ public class KeyStoreConfig { protected String keyAlias; protected String realmAlias; protected String format; + protected Integer keySize; + protected Integer validity; public Boolean isRealmCertificate() { return realmCertificate; @@ -77,4 +79,20 @@ public String getFormat() { public void setFormat(String format) { this.format = format; } + + public Integer getKeySize() { + return keySize; + } + + public void setKeySize(Integer keySize) { + this.keySize = keySize; + } + + public Integer getValidity() { + return validity; + } + + public void setValidity(Integer validity) { + this.validity = validity; + } } diff --git a/core/src/main/java/org/keycloak/representations/account/ConsentScopeRepresentation.java b/core/src/main/java/org/keycloak/representations/account/ConsentScopeRepresentation.java index a4b6400a1641..31eb0eb60b93 100644 --- a/core/src/main/java/org/keycloak/representations/account/ConsentScopeRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/account/ConsentScopeRepresentation.java @@ -23,15 +23,15 @@ public class ConsentScopeRepresentation { private String name; - private String displayTest; + private String displayText; public ConsentScopeRepresentation() { } - public ConsentScopeRepresentation(String id, String name, String displayTest) { + public ConsentScopeRepresentation(String id, String name, String displayText) { this.id = id; this.name = name; - this.displayTest = displayTest; + this.displayText = displayText; } public String getId() { @@ -50,11 +50,27 @@ public void setName(String name) { this.name = name; } + public String getDisplayText() { + return displayText; + } + + public void setDisplayText(String displayText) { + this.displayText = displayText; + } + + /** + * @deprecated Use {@link #getDisplayText()} instead. This method will be removed in KC 27.0. + */ + @Deprecated public String getDisplayTest() { - return displayTest; + return displayText; } + /** + * @deprecated Use {@link #setDisplayText(String)} instead. This method will be removed in KC 27.0. + */ + @Deprecated public void setDisplayTest(String displayTest) { - this.displayTest = displayTest; + this.displayText = displayTest; } } diff --git a/core/src/main/java/org/keycloak/representations/account/CredentialMetadataRepresentation.java b/core/src/main/java/org/keycloak/representations/account/CredentialMetadataRepresentation.java index da42b416bc02..594ab0d64a0b 100644 --- a/core/src/main/java/org/keycloak/representations/account/CredentialMetadataRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/account/CredentialMetadataRepresentation.java @@ -4,9 +4,9 @@ public class CredentialMetadataRepresentation { - String infoMessage; - String warningMessageTitle; - String warningMessageDescription; + LocalizedMessage infoMessage; + LocalizedMessage warningMessageTitle; + LocalizedMessage warningMessageDescription; private CredentialRepresentation credential; @@ -19,27 +19,27 @@ public void setCredential(CredentialRepresentation credential) { this.credential = credential; } - public String getInfoMessage() { + public LocalizedMessage getInfoMessage() { return infoMessage; } - public void setInfoMessage(String infoMessage) { + public void setInfoMessage(LocalizedMessage infoMessage) { this.infoMessage = infoMessage; } - public String getWarningMessageTitle() { + public LocalizedMessage getWarningMessageTitle() { return warningMessageTitle; } - public void setWarningMessageTitle(String warningMessageTitle) { + public void setWarningMessageTitle(LocalizedMessage warningMessageTitle) { this.warningMessageTitle = warningMessageTitle; } - public String getWarningMessageDescription() { + public LocalizedMessage getWarningMessageDescription() { return warningMessageDescription; } - public void setWarningMessageDescription(String warningMessageDescription) { + public void setWarningMessageDescription(LocalizedMessage warningMessageDescription) { this.warningMessageDescription = warningMessageDescription; } } diff --git a/core/src/main/java/org/keycloak/representations/account/DeviceRepresentation.java b/core/src/main/java/org/keycloak/representations/account/DeviceRepresentation.java index 9ec8b370d496..903cdba7205e 100644 --- a/core/src/main/java/org/keycloak/representations/account/DeviceRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/account/DeviceRepresentation.java @@ -57,6 +57,11 @@ public void setId(String id) { this.id = id; } + /** + * Note: will not be an address when a proxy does not provide a valid one + * + * @return the ip address + */ public String getIpAddress() { return ipAddress; } diff --git a/core/src/main/java/org/keycloak/representations/account/LocalizedMessage.java b/core/src/main/java/org/keycloak/representations/account/LocalizedMessage.java new file mode 100644 index 000000000000..4f95a54e8855 --- /dev/null +++ b/core/src/main/java/org/keycloak/representations/account/LocalizedMessage.java @@ -0,0 +1,44 @@ +/* + * Copyright 2025 Red Hat, Inc. and/or its affiliates + * and other contributors as indicated by the @author tags. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.keycloak.representations.account; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * + * @author rmartinc + */ +public class LocalizedMessage { + + private final String key; + private final String[] parameters; + + @JsonCreator + public LocalizedMessage(@JsonProperty("key") String key, @JsonProperty("parameters") String... parameters) { + this.key = key; + this.parameters = parameters == null || parameters.length == 0? null : parameters; + } + + public String getKey() { + return key; + } + + public String[] getParameters() { + return parameters; + } +} diff --git a/core/src/main/java/org/keycloak/representations/account/SessionRepresentation.java b/core/src/main/java/org/keycloak/representations/account/SessionRepresentation.java index 996c85226c74..4b86319d70f6 100644 --- a/core/src/main/java/org/keycloak/representations/account/SessionRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/account/SessionRepresentation.java @@ -24,6 +24,11 @@ public void setId(String id) { this.id = id; } + /** + * Note: will not be an address when a proxy does not provide a valid one + * + * @return the ip address + */ public String getIpAddress() { return ipAddress; } diff --git a/core/src/main/java/org/keycloak/representations/idm/AbstractUserRepresentation.java b/core/src/main/java/org/keycloak/representations/idm/AbstractUserRepresentation.java index c4fe9cb7a761..b17c72f360b4 100644 --- a/core/src/main/java/org/keycloak/representations/idm/AbstractUserRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/idm/AbstractUserRepresentation.java @@ -45,6 +45,7 @@ public abstract class AbstractUserRepresentation { @JsonDeserialize(using = StringListMapDeserializer.class) protected Map> attributes; private UserProfileMetadata userProfileMetadata; + protected Boolean enabled; public String getId() { @@ -154,4 +155,12 @@ public void setUserProfileMetadata(UserProfileMetadata userProfileMetadata) { public UserProfileMetadata getUserProfileMetadata() { return userProfileMetadata; } + + public Boolean isEnabled() { + return enabled; + } + + public void setEnabled(Boolean enabled) { + this.enabled = enabled; + } } diff --git a/core/src/main/java/org/keycloak/representations/idm/AdminEventRepresentation.java b/core/src/main/java/org/keycloak/representations/idm/AdminEventRepresentation.java index d6687e8f321c..f015762288b4 100644 --- a/core/src/main/java/org/keycloak/representations/idm/AdminEventRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/idm/AdminEventRepresentation.java @@ -24,6 +24,7 @@ */ public class AdminEventRepresentation { + private String id; private long time; private String realmId; private AuthDetailsRepresentation authDetails; @@ -34,6 +35,14 @@ public class AdminEventRepresentation { private String error; private Map details; + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + public long getTime() { return time; } diff --git a/core/src/main/java/org/keycloak/representations/idm/AuthDetailsRepresentation.java b/core/src/main/java/org/keycloak/representations/idm/AuthDetailsRepresentation.java index 746aa718de8d..b65f5aa05a3e 100644 --- a/core/src/main/java/org/keycloak/representations/idm/AuthDetailsRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/idm/AuthDetailsRepresentation.java @@ -52,6 +52,11 @@ public void setUserId(String userId) { this.userId = userId; } + /** + * Note: will not be an address when a proxy does not provide a valid one + * + * @return the ip address + */ public String getIpAddress() { return ipAddress; } diff --git a/core/src/main/java/org/keycloak/representations/idm/ClientPolicyConditionRepresentation.java b/core/src/main/java/org/keycloak/representations/idm/ClientPolicyConditionRepresentation.java index 9e771581cf19..bc86bd022aa3 100644 --- a/core/src/main/java/org/keycloak/representations/idm/ClientPolicyConditionRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/idm/ClientPolicyConditionRepresentation.java @@ -22,6 +22,8 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.databind.JsonNode; +import org.eclipse.microprofile.openapi.annotations.enums.SchemaType; +import org.eclipse.microprofile.openapi.annotations.media.Schema; /** * @author Marek Posolda @@ -32,6 +34,9 @@ public class ClientPolicyConditionRepresentation { private String conditionProviderId; @JsonProperty("configuration") + @Schema(type= SchemaType.OBJECT, + description = "Configuration settings as a JSON object", + additionalProperties = Schema.True.class) private JsonNode configuration; public String getConditionProviderId() { diff --git a/core/src/main/java/org/keycloak/representations/idm/ClientPolicyExecutorRepresentation.java b/core/src/main/java/org/keycloak/representations/idm/ClientPolicyExecutorRepresentation.java index a0c2a3fb4f5d..81dbe4887afc 100644 --- a/core/src/main/java/org/keycloak/representations/idm/ClientPolicyExecutorRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/idm/ClientPolicyExecutorRepresentation.java @@ -22,6 +22,8 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.databind.JsonNode; +import org.eclipse.microprofile.openapi.annotations.enums.SchemaType; +import org.eclipse.microprofile.openapi.annotations.media.Schema; /** * @author Marek Posolda @@ -32,6 +34,9 @@ public class ClientPolicyExecutorRepresentation { private String executorProviderId; @JsonProperty("configuration") + @Schema(type=SchemaType.OBJECT, + description = "Configuration settings as a JSON object", + additionalProperties = Schema.True.class) private JsonNode configuration; public String getExecutorProviderId() { diff --git a/core/src/main/java/org/keycloak/representations/idm/ComponentTypeRepresentation.java b/core/src/main/java/org/keycloak/representations/idm/ComponentTypeRepresentation.java index 1662fa252223..b141d3d1f707 100644 --- a/core/src/main/java/org/keycloak/representations/idm/ComponentTypeRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/idm/ComponentTypeRepresentation.java @@ -57,8 +57,9 @@ public void setProperties(List properties) { } /** - * Extra information about the component that might come from annotations or interfaces that the component implements - * For example, if UserStorageProvider implements ImportSynchronization + * Extra information about the component + * that might come from annotations or interfaces that the component implements. + * For example, if UserStorageProviderFactory implements ImportSynchronization * * @return */ diff --git a/core/src/main/java/org/keycloak/representations/idm/CredentialRepresentation.java b/core/src/main/java/org/keycloak/representations/idm/CredentialRepresentation.java index 0917196da897..d75b44808295 100755 --- a/core/src/main/java/org/keycloak/representations/idm/CredentialRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/idm/CredentialRepresentation.java @@ -62,6 +62,7 @@ public class CredentialRepresentation { private Integer period; @Deprecated private MultivaluedHashMap config; + private String federationLink; public String getId() { return id; @@ -246,5 +247,11 @@ public boolean equals(Object obj) { return true; } + public void setFederationLink(String federationLink) { + this.federationLink = federationLink; + } + public String getFederationLink() { + return federationLink; + } } diff --git a/core/src/main/java/org/keycloak/representations/idm/EventRepresentation.java b/core/src/main/java/org/keycloak/representations/idm/EventRepresentation.java index 0621b9b040fd..b47337ba03e0 100644 --- a/core/src/main/java/org/keycloak/representations/idm/EventRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/idm/EventRepresentation.java @@ -24,6 +24,7 @@ */ public class EventRepresentation { + private String id; private long time; private String type; private String realmId; @@ -34,6 +35,14 @@ public class EventRepresentation { private String error; private Map details; + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + public long getTime() { return time; } @@ -82,6 +91,11 @@ public void setSessionId(String sessionId) { this.sessionId = sessionId; } + /** + * Note: will not be an address when a proxy does not provide a valid one + * + * @return the ip address + */ public String getIpAddress() { return ipAddress; } @@ -128,6 +142,7 @@ public boolean equals(Object o) { @Override public int hashCode() { int result = (int) (time ^ (time >>> 32)); + result = 31 * result + (id != null ? id.hashCode() : 0); result = 31 * result + (type != null ? type.hashCode() : 0); result = 31 * result + (realmId != null ? realmId.hashCode() : 0); result = 31 * result + (clientId != null ? clientId.hashCode() : 0); diff --git a/core/src/main/java/org/keycloak/representations/idm/GroupRepresentation.java b/core/src/main/java/org/keycloak/representations/idm/GroupRepresentation.java index d55129a9298b..f811a0b575c1 100755 --- a/core/src/main/java/org/keycloak/representations/idm/GroupRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/idm/GroupRepresentation.java @@ -34,6 +34,7 @@ public class GroupRepresentation { // to identify a group and operate on it in a basic way protected String id; protected String name; + protected String description; protected String path; protected String parentId; protected Long subGroupCount; @@ -62,6 +63,14 @@ public void setName(String name) { this.name = name; } + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + public String getPath() { return path; } diff --git a/core/src/main/java/org/keycloak/representations/idm/RealmRepresentation.java b/core/src/main/java/org/keycloak/representations/idm/RealmRepresentation.java index 6eaf4f498224..33df4f495b3b 100755 --- a/core/src/main/java/org/keycloak/representations/idm/RealmRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/idm/RealmRepresentation.java @@ -156,6 +156,7 @@ public class RealmRepresentation { protected Boolean webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister; protected List webAuthnPolicyPasswordlessAcceptableAaguids; protected List webAuthnPolicyPasswordlessExtraOrigins; + protected Boolean webAuthnPolicyPasswordlessPasskeysEnabled; // Client Policies/Profiles @@ -1263,6 +1264,14 @@ public void setWebAuthnPolicyPasswordlessExtraOrigins(List extraOrigins) this.webAuthnPolicyPasswordlessExtraOrigins = extraOrigins; } + public Boolean getWebAuthnPolicyPasswordlessPasskeysEnabled(){ + return webAuthnPolicyPasswordlessPasskeysEnabled; + } + + public void setWebAuthnPolicyPasswordlessPasskeysEnabled(Boolean webAuthnPolicyPasswordlessPasskeysEnabled) { + this.webAuthnPolicyPasswordlessPasskeysEnabled = webAuthnPolicyPasswordlessPasskeysEnabled; + } + // Client Policies/Profiles @JsonIgnore diff --git a/core/src/main/java/org/keycloak/representations/idm/UserRepresentation.java b/core/src/main/java/org/keycloak/representations/idm/UserRepresentation.java index 69c1ee06133c..889301b77fc7 100755 --- a/core/src/main/java/org/keycloak/representations/idm/UserRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/idm/UserRepresentation.java @@ -30,7 +30,6 @@ public class UserRepresentation extends AbstractUserRepresentation{ protected String self; // link protected String origin; protected Long createdTimestamp; - protected Boolean enabled; protected Boolean totp; protected String federationLink; protected String serviceAccountClientId; // For rep, it points to clientId (not DB ID) @@ -104,14 +103,6 @@ public void setCreatedTimestamp(Long createdTimestamp) { this.createdTimestamp = createdTimestamp; } - public Boolean isEnabled() { - return enabled; - } - - public void setEnabled(Boolean enabled) { - this.enabled = enabled; - } - @Deprecated public Boolean isTotp() { return totp; diff --git a/core/src/main/java/org/keycloak/representations/idm/UserSessionRepresentation.java b/core/src/main/java/org/keycloak/representations/idm/UserSessionRepresentation.java index 8bada557793a..a9f8679bd9fc 100755 --- a/core/src/main/java/org/keycloak/representations/idm/UserSessionRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/idm/UserSessionRepresentation.java @@ -59,6 +59,11 @@ public void setUserId(String userId) { this.userId = userId; } + /** + * Note: will not be an address when a proxy does not provide a valid one + * + * @return the ip address + */ public String getIpAddress() { return ipAddress; } diff --git a/core/src/main/java/org/keycloak/representations/idm/authorization/PolicyEvaluationRequest.java b/core/src/main/java/org/keycloak/representations/idm/authorization/PolicyEvaluationRequest.java index d5663940a765..ee08df650cdb 100644 --- a/core/src/main/java/org/keycloak/representations/idm/authorization/PolicyEvaluationRequest.java +++ b/core/src/main/java/org/keycloak/representations/idm/authorization/PolicyEvaluationRequest.java @@ -30,6 +30,7 @@ public class PolicyEvaluationRequest { private Map> context = new HashMap<>(); private List resources = new LinkedList<>(); + private String resourceType; private String clientId; private String userId; private List roleIds = new LinkedList<>(); @@ -51,6 +52,14 @@ public void setResources(List resources) { this.resources = resources; } + public String getResourceType() { + return resourceType; + } + + public void setResourceType(String resourceType) { + this.resourceType = resourceType; + } + public String getClientId() { return this.clientId; } diff --git a/core/src/main/java/org/keycloak/representations/idm/authorization/PolicyEvaluationResponse.java b/core/src/main/java/org/keycloak/representations/idm/authorization/PolicyEvaluationResponse.java index bfa4c30d115b..b7bbd24c368c 100644 --- a/core/src/main/java/org/keycloak/representations/idm/authorization/PolicyEvaluationResponse.java +++ b/core/src/main/java/org/keycloak/representations/idm/authorization/PolicyEvaluationResponse.java @@ -71,9 +71,10 @@ public static class EvaluationResultRepresentation { private ResourceRepresentation resource; private List scopes; - private List policies; + private Set policies; private DecisionEffect status; - private List allowedScopes = new ArrayList<>(); + private Set allowedScopes = new HashSet<>(); + private Set deniedScopes = new HashSet<>(); public void setResource(final ResourceRepresentation resource) { this.resource = resource; @@ -91,11 +92,11 @@ public List getScopes() { return scopes; } - public void setPolicies(final List policies) { + public void setPolicies(final Set policies) { this.policies = policies; } - public List getPolicies() { + public Set getPolicies() { return policies; } @@ -107,13 +108,21 @@ public DecisionEffect getStatus() { return status; } - public void setAllowedScopes(List allowedScopes) { + public void setAllowedScopes(Set allowedScopes) { this.allowedScopes = allowedScopes; } - public List getAllowedScopes() { + public Set getAllowedScopes() { return allowedScopes; } + + public void setDeniedScopes(Set deniedScopes) { + this.deniedScopes = deniedScopes; + } + + public Set getDeniedScopes() { + return deniedScopes; + } } public static class PolicyResultRepresentation { @@ -122,6 +131,7 @@ public static class PolicyResultRepresentation { private DecisionEffect status; private List associatedPolicies; private Set scopes = new HashSet<>(); + private String resourceType; public PolicyRepresentation getPolicy() { return policy; @@ -149,7 +159,7 @@ public void setAssociatedPolicies(final List associa @Override public int hashCode() { - return this.policy.hashCode(); + return this.policy.getName().hashCode(); } @Override @@ -157,7 +167,7 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final PolicyResultRepresentation policy = (PolicyResultRepresentation) o; - return this.policy.equals(policy.getPolicy()); + return this.policy.getName().equals(policy.getPolicy().getName()); } public void setScopes(Set scopes) { @@ -167,5 +177,13 @@ public void setScopes(Set scopes) { public Set getScopes() { return scopes; } + + public void setResourceType(String resourceType) { + this.resourceType = resourceType; + } + + public String getResourceType() { + return resourceType; + } } } diff --git a/core/src/main/java/org/keycloak/representations/idm/authorization/ResourceType.java b/core/src/main/java/org/keycloak/representations/idm/authorization/ResourceType.java index 6c6b383fd084..ce48472fb22d 100644 --- a/core/src/main/java/org/keycloak/representations/idm/authorization/ResourceType.java +++ b/core/src/main/java/org/keycloak/representations/idm/authorization/ResourceType.java @@ -19,16 +19,30 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Collections; +import java.util.Map; import java.util.Set; public class ResourceType { + private final String type; private final Set scopes; + private final Map> scopeAliases; + private final String groupType; @JsonCreator public ResourceType(@JsonProperty("type") String type, @JsonProperty("scopes") Set scopes) { + this(type, scopes, Collections.emptyMap()); + } + + public ResourceType(String type, Set scopes, Map> scopeAliases) { + this(type, scopes, scopeAliases, null); + } + + public ResourceType(String type, Set scopes, Map> scopeAliases, String groupType) { this.type = type; this.scopes = Collections.unmodifiableSet(scopes); + this.scopeAliases = scopeAliases; + this.groupType = groupType; } public String getType() { @@ -38,4 +52,12 @@ public String getType() { public Set getScopes() { return Collections.unmodifiableSet(scopes); } + + public Map> getScopeAliases() { + return Collections.unmodifiableMap(scopeAliases); + } + + public String getGroupType() { + return groupType; + } } diff --git a/core/src/main/java/org/keycloak/representations/info/CpuInfoRepresentation.java b/core/src/main/java/org/keycloak/representations/info/CpuInfoRepresentation.java new file mode 100644 index 000000000000..7cd43fdd288d --- /dev/null +++ b/core/src/main/java/org/keycloak/representations/info/CpuInfoRepresentation.java @@ -0,0 +1,21 @@ +package org.keycloak.representations.info; + +public class CpuInfoRepresentation { + + protected long processorCount; + + public static CpuInfoRepresentation create() { + Runtime runtime = Runtime.getRuntime(); + CpuInfoRepresentation rep = new CpuInfoRepresentation(); + rep.setProcessorCount(runtime.availableProcessors()); + return rep; + } + + public long getProcessorCount() { + return processorCount; + } + + public void setProcessorCount(long processorCount) { + this.processorCount = processorCount; + } +} diff --git a/core/src/main/java/org/keycloak/representations/info/ServerInfoRepresentation.java b/core/src/main/java/org/keycloak/representations/info/ServerInfoRepresentation.java index adb152098dc8..a20bed5d73df 100755 --- a/core/src/main/java/org/keycloak/representations/info/ServerInfoRepresentation.java +++ b/core/src/main/java/org/keycloak/representations/info/ServerInfoRepresentation.java @@ -31,6 +31,7 @@ public class ServerInfoRepresentation { private SystemInfoRepresentation systemInfo; + private CpuInfoRepresentation cpuInfo; private MemoryInfoRepresentation memoryInfo; private ProfileInfoRepresentation profileInfo; @@ -71,6 +72,14 @@ public void setMemoryInfo(MemoryInfoRepresentation memoryInfo) { this.memoryInfo = memoryInfo; } + public CpuInfoRepresentation getCpuInfo() { + return cpuInfo; + } + + public void setCpuInfo(CpuInfoRepresentation cpuInfo) { + this.cpuInfo = cpuInfo; + } + public ProfileInfoRepresentation getProfileInfo() { return profileInfo; } diff --git a/core/src/main/java/org/keycloak/util/JWKSUtils.java b/core/src/main/java/org/keycloak/util/JWKSUtils.java index c66087a59006..b4e9d0652113 100644 --- a/core/src/main/java/org/keycloak/util/JWKSUtils.java +++ b/core/src/main/java/org/keycloak/util/JWKSUtils.java @@ -17,6 +17,7 @@ package org.keycloak.util; +import com.fasterxml.jackson.databind.JsonNode; import org.jboss.logging.Logger; import org.keycloak.crypto.KeyUse; import org.keycloak.crypto.KeyWrapper; @@ -33,6 +34,7 @@ import java.io.IOException; import java.security.PublicKey; +import java.security.interfaces.ECPublicKey; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -144,20 +146,30 @@ public static String computeThumbprint(JWK key) { } // TreeMap uses the natural ordering of the keys. - // Therefore, it follows the way of hash value calculation for a public key defined by RFC 7678 + // Therefore, it follows the way of hash value calculation for a public key defined by RFC 7638 public static String computeThumbprint(JWK key, String hashAlg) { - Map members = new TreeMap<>(); - members.put(JWK.KEY_TYPE, key.getKeyType()); + String kty = key.getKeyType(); + String[] requiredMembers = JWK_THUMBPRINT_REQUIRED_MEMBERS.get(kty); - for (String member : JWK_THUMBPRINT_REQUIRED_MEMBERS.get(key.getKeyType())) { - members.put(member, (String) key.getOtherClaims().get(member)); + // e.g. `oct`, see RFC 7638 Section 3.2 + if (requiredMembers == null) { + throw new UnsupportedOperationException("Unsupported key type: " + kty); } + Map members = new TreeMap<>(); + members.put(JWK.KEY_TYPE, kty); + try { + JsonNode node = JsonSerialization.writeValueAsNode(key); + for (String member : requiredMembers) { + members.put(member, node.get(member).asText()); + } + byte[] bytes = JsonSerialization.writeValueAsBytes(members); byte[] hash = HashUtils.hash(hashAlg, bytes); return Base64Url.encode(hash); } catch (IOException ex) { + logger.debugf(ex, "Failed to compute JWK thumbprint for key '%s'.", key.getKeyId()); return null; } } diff --git a/core/src/main/java/org/keycloak/util/JsonSerialization.java b/core/src/main/java/org/keycloak/util/JsonSerialization.java index 72945e48b66c..faa0d71c7a17 100755 --- a/core/src/main/java/org/keycloak/util/JsonSerialization.java +++ b/core/src/main/java/org/keycloak/util/JsonSerialization.java @@ -70,6 +70,10 @@ public static byte[] writeValueAsBytes(Object obj) throws IOException { return mapper.writeValueAsBytes(obj); } + public static JsonNode writeValueAsNode(Object obj) { + return mapper.valueToTree(obj); + } + public static T readValue(byte[] bytes, Class type) throws IOException { return mapper.readValue(bytes, type); } diff --git a/core/src/main/java/org/keycloak/util/TokenUtil.java b/core/src/main/java/org/keycloak/util/TokenUtil.java index 6b699afd5864..9103c38d3dd3 100644 --- a/core/src/main/java/org/keycloak/util/TokenUtil.java +++ b/core/src/main/java/org/keycloak/util/TokenUtil.java @@ -42,6 +42,9 @@ public class TokenUtil { public static final String TOKEN_TYPE_DPOP = "DPoP"; + // Mentioned in the token-exchange specification https://datatracker.ietf.org/doc/html/rfc8693#name-successful-response + public static final String TOKEN_TYPE_NA = "N_A"; + // JWT Access Token types from https://datatracker.ietf.org/doc/html/rfc9068#section-2.1 public static final String TOKEN_TYPE_JWT_ACCESS_TOKEN = "at+jwt"; public static final String TOKEN_TYPE_JWT_ACCESS_TOKEN_PREFIXED = "application/" + TOKEN_TYPE_JWT_ACCESS_TOKEN; diff --git a/core/src/test/java/org/keycloak/jose/jwk/JWKTest.java b/core/src/test/java/org/keycloak/jose/jwk/JWKTest.java index d93d88e3b276..8bfff750c631 100644 --- a/core/src/test/java/org/keycloak/jose/jwk/JWKTest.java +++ b/core/src/test/java/org/keycloak/jose/jwk/JWKTest.java @@ -17,18 +17,14 @@ package org.keycloak.jose.jwk; -import java.util.Arrays; -import java.util.List; - import org.junit.ClassRule; import org.junit.Test; +import org.keycloak.common.crypto.CryptoIntegration; import org.keycloak.common.util.Base64Url; import org.keycloak.common.util.KeyUtils; import org.keycloak.common.util.PemUtils; import org.keycloak.crypto.JavaAlgorithm; import org.keycloak.crypto.KeyType; -import org.keycloak.crypto.KeyUse; -import org.keycloak.common.crypto.CryptoIntegration; import org.keycloak.rule.CryptoInitRule; import org.keycloak.util.JsonSerialization; @@ -42,12 +38,13 @@ import java.security.cert.X509Certificate; import java.security.interfaces.ECPublicKey; import java.security.spec.ECGenParameterSpec; +import java.util.Arrays; +import java.util.List; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import static org.keycloak.common.util.CertificateUtils.generateV1SelfSignedCertificate; import static org.keycloak.common.util.CertificateUtils.generateV3Certificate; @@ -229,15 +226,15 @@ public void publicEs256P384() throws Exception { } @Test - public void parse() { + public void parseRsa() { String jwkJson = "{" + - " \"kty\": \"RSA\"," + - " \"alg\": \"RS256\"," + - " \"use\": \"sig\"," + - " \"kid\": \"3121adaa80ace09f89d80899d4a5dc4ce33d0747\"," + - " \"n\": \"soFDjoZ5mQ8XAA7reQAFg90inKAHk0DXMTizo4JuOsgzUbhcplIeZ7ks83hsEjm8mP8lUVaHMPMAHEIp3gu6Xxsg-s73ofx1dtt_Fo7aj8j383MFQGl8-FvixTVobNeGeC0XBBQjN8lEl-lIwOa4ZoERNAShplTej0ntDp7TQm0=\"," + - " \"e\": \"AQAB\"" + - " }"; + " \"kty\": \"RSA\"," + + " \"alg\": \"RS256\"," + + " \"use\": \"sig\"," + + " \"kid\": \"3121adaa80ace09f89d80899d4a5dc4ce33d0747\"," + + " \"n\": \"soFDjoZ5mQ8XAA7reQAFg90inKAHk0DXMTizo4JuOsgzUbhcplIeZ7ks83hsEjm8mP8lUVaHMPMAHEIp3gu6Xxsg-s73ofx1dtt_Fo7aj8j383MFQGl8-FvixTVobNeGeC0XBBQjN8lEl-lIwOa4ZoERNAShplTej0ntDp7TQm0=\"," + + " \"e\": \"AQAB\"" + + " }"; PublicKey key = JWKParser.create().parse(jwkJson).toPublicKey(); assertEquals("RSA", key.getAlgorithm()); @@ -245,20 +242,41 @@ public void parse() { } @Test - public void emptyEcOverclaim() throws Exception { - JWKBuilder builder = JWKBuilder.create(); - KeyPairGenerator generator = KeyPairGenerator.getInstance("EC"); - KeyPair keyPair = generator.generateKeyPair(); - JWK jwk = builder.ec(keyPair.getPublic(), KeyUse.ENC); - JWKParser parser = new JWKParser(jwk); - - try { - parser.toPublicKey(); - } catch (NullPointerException e) { - fail("NullPointerException is thrown: " + e.getMessage()); - } catch (RuntimeException e) { - // Other runtime exception is expected. - } + public void parseEc() { + + String jwkJson = "{\n" + + " \"kty\": \"EC\",\n" + + " \"use\": \"sig\",\n" + + " \"crv\": \"P-384\",\n" + + " \"kid\": \"KTGEM0qFeO9VGjTLjmXiE_R_eSBUkU87xmytygI1pFQ\",\n" + + " \"x\": \"_pYSppQj0JkrXFQdJPOTiktUxy_giDnqc-PEmNShrWrZm8Ol6E5qB3m1kmZJ7HUF\",\n" + + " \"y\": \"BVlstiJytsgOxrsC1VuNYdx86KKMeJg5WvJhEi-5kMpF2aMHZqbJCcIq0uRdzi7Q\",\n" + + " \"alg\": \"ES256\"\n" + + "}"; + + JWKParser sut = JWKParser.create().parse(jwkJson); + + PublicKey pub = sut.toPublicKey(); + assertNotNull(pub); + assertTrue( pub.getAlgorithm().startsWith("EC")); + assertEquals("X.509", pub.getFormat()); + } + + @Test + public void toPublicKey_EC() { + + ECPublicJWK ecJwk = new ECPublicJWK(); + ecJwk.setKeyType(KeyType.EC); + ecJwk.setCrv("P-256"); + ecJwk.setX("zHXlTZt3yU_oNnLIjgpt-ZaiStrYIzR2oxxq53J0uIs"); + ecJwk.setY("cOsAvnh6olE8KHWPHmB-pJawRWmTtbChmWtSeWZRJdc"); + + JWKParser sut = JWKParser.create(ecJwk); + + PublicKey pub = sut.toPublicKey(); + assertNotNull(pub); + assertTrue(pub.getAlgorithm().startsWith("EC")); + assertEquals("X.509", pub.getFormat()); } private byte[] sign(byte[] data, String javaAlgorithm, PrivateKey key) throws Exception { diff --git a/core/src/test/java/org/keycloak/util/JWKSUtilsTest.java b/core/src/test/java/org/keycloak/util/JWKSUtilsTest.java index 983a391accda..b769a87e0f88 100644 --- a/core/src/test/java/org/keycloak/util/JWKSUtilsTest.java +++ b/core/src/test/java/org/keycloak/util/JWKSUtilsTest.java @@ -22,6 +22,7 @@ import org.keycloak.crypto.KeyUse; import org.keycloak.crypto.KeyWrapper; import org.keycloak.crypto.PublicKeysWrapper; +import org.keycloak.jose.jwk.ECPublicJWK; import org.keycloak.jose.jwk.JSONWebKeySet; import org.keycloak.jose.jwk.JWK; import org.keycloak.rule.CryptoInitRule; @@ -29,6 +30,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; public abstract class JWKSUtilsTest { @@ -36,6 +38,35 @@ public abstract class JWKSUtilsTest { @ClassRule public static CryptoInitRule cryptoInitRule = new CryptoInitRule(); + @Test + public void publicEcMatches() throws Exception { + String keyA = "{" + + " \"kty\": \"EC\"," + + " \"use\": \"sig\"," + + " \"crv\": \"P-384\"," + + " \"kid\": \"key-a\"," + + " \"x\": \"KVZ5h_W0-8fXmUrxmyRpO_9vwwI7urXfyxGdxm1hpEuhPj2hhDxivnb2BhNvtC6O\"," + + " \"y\": \"1J3JVw_zR3uB3biAE7fs3V_4tJy2M1JinzWj9a4je5GSoW6zgGV4bk85OcuyUAhj\"," + + " \"alg\": \"ES384\"" + + " }"; + + ECPublicJWK ecPublicKey = JsonSerialization.readValue(keyA, ECPublicJWK.class); + JWK publicKey = JsonSerialization.readValue(keyA, JWK.class); + + assertEquals(JWKSUtils.computeThumbprint(publicKey), JWKSUtils.computeThumbprint(ecPublicKey)); + } + + @Test + public void unsupportedKeyType() throws Exception { + String keyA = "{" + + " \"kty\": \"OCT\"," + + " \"use\": \"sig\"" + + " }"; + + JWK publicKey = JsonSerialization.readValue(keyA, JWK.class); + assertThrows(UnsupportedOperationException.class, () -> JWKSUtils.computeThumbprint(publicKey)); + } + @Test public void publicRs256() throws Exception { diff --git a/core/src/test/java/org/keycloak/util/PemUtilsTest.java b/core/src/test/java/org/keycloak/util/PemUtilsTest.java index 5dc4066c44c6..83449a0589a8 100644 --- a/core/src/test/java/org/keycloak/util/PemUtilsTest.java +++ b/core/src/test/java/org/keycloak/util/PemUtilsTest.java @@ -12,6 +12,7 @@ import org.junit.Test; import org.keycloak.common.util.CertificateUtils; import org.keycloak.common.util.KeyUtils; +import org.keycloak.common.util.PemException; import org.keycloak.common.util.PemUtils; import org.keycloak.rule.CryptoInitRule; @@ -118,6 +119,50 @@ public void testPrivateKeyInPKCS8Format() { String pk = PemUtils.removeBeginEnd(privateKeyPkcs8).replace("\n", ""); PrivateKey decodedPrivateKey2 = PemUtils.decodePrivateKey(pk); Assert.assertEquals(decodedPrivateKey1, decodedPrivateKey2); + + String ecPrivateKeyPkcs8 = "-----BEGIN PRIVATE KEY-----\n" + + "MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgO1oavi4kqVFc/rxj\n" + + "24SJivHXq7buWX58U0tswYikPwyhRANCAASCIp6nVvOk9flbUrMW7JPDmyaXCnDc\n" + + "Q2uMfvxVWIJzBuhG6VDoeFPk3yf2EN5t7Q8FU5jPSp6gJz9xbaFYYLL6\n" + + "-----END PRIVATE KEY-----"; + + PrivateKey decodedEcPrivateKey = PemUtils.decodePrivateKey(ecPrivateKeyPkcs8); + Assert.assertEquals("EC", decodedEcPrivateKey.getAlgorithm()); + } + + @Test + public void testDecodeCertificateBundle() { + String certBundleEC = "-----BEGIN CERTIFICATE-----\n" + + "MIIBUTCB96ADAgECAggYMJVpV/BvyTAKBggqhkjOPQQDAjARMQ8wDQYDVQQDEwZz\n" + + "dWItY2EwIBcNMDAwMTAxMDkwMDAwWhgPMjEwMDAxMDEwOTAwMDBaMBUxEzARBgNV\n" + + "BAMTCmVuZC1lbnRpdHkwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASCIp6nVvOk\n" + + "9flbUrMW7JPDmyaXCnDcQ2uMfvxVWIJzBuhG6VDoeFPk3yf2EN5t7Q8FU5jPSp6g\n" + + "Jz9xbaFYYLL6ozMwMTAOBgNVHQ8BAf8EBAMCBaAwHwYDVR0jBBgwFoAU3etTPCDC\n" + + "f31HxBuYWWjF9ImW4ccwCgYIKoZIzj0EAwIDSQAwRgIhAKpP+HBEvUWEfjdr2qD2\n" + + "sw/bVLtW1HnpqVnQm2i/kDp2AiEA6F+kKyMNu+jGKmzj0Pf6v0cj0c+f00bqoJdk\n" + + "h+GXGnM=\n" + + "-----END CERTIFICATE-----\n" + + "-----BEGIN CERTIFICATE-----\n" + + "MIIBejCCAR+gAwIBAgIIGDCVaVflNG8wCgYIKoZIzj0EAwIwDTELMAkGA1UEAxMC\n" + + "Y2EwIBcNMDAwMTAxMDkwMDAwWhgPMjEwMDAxMDEwOTAwMDBaMBExDzANBgNVBAMT\n" + + "BnN1Yi1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABI4bNe/0VXXojhjdh76p\n" + + "89esSheOT5WEBVQnJUvDBDSRoxRiFx2BEdPaVn8L4cCbaZIxLsoJusOJadm7Eltc\n" + + "h3qjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW\n" + + "BBTd61M8IMJ/fUfEG5hZaMX0iZbhxzAfBgNVHSMEGDAWgBQ9q0KnjYuFWTSXf4YM\n" + + "Taz6vbNVRTAKBggqhkjOPQQDAgNJADBGAiEA3y9pa2JMhtM898f6NOZhezoHzj1a\n" + + "2JQIZRLQbOTjk0wCIQCg9A8414teP9whzRGSxM4eJNExdfHeJBYjDD345EW0vg==\n" + + "-----END CERTIFICATE-----"; + + X509Certificate[] certs = PemUtils.decodeCertificates(certBundleEC); + Assert.assertEquals(2, certs.length); + Assert.assertEquals("CN=end-entity", certs[0].getSubjectX500Principal().getName()); + Assert.assertEquals("CN=sub-ca", certs[1].getSubjectX500Principal().getName()); + + String invalidCertBundle = "foo\n"; + Assert.assertThrows(PemException.class, () -> { + PemUtils.decodeCertificates(invalidCertBundle); + }); + } private void testPrivateKeyEncodeDecode(String origPrivateKeyPem) { diff --git a/crypto/default/src/main/java/org/keycloak/crypto/hash/Argon2PasswordHashProviderFactory.java b/crypto/default/src/main/java/org/keycloak/crypto/hash/Argon2PasswordHashProviderFactory.java index c625a510bf2d..53a28907e713 100644 --- a/crypto/default/src/main/java/org/keycloak/crypto/hash/Argon2PasswordHashProviderFactory.java +++ b/crypto/default/src/main/java/org/keycloak/crypto/hash/Argon2PasswordHashProviderFactory.java @@ -30,7 +30,7 @@ public class Argon2PasswordHashProviderFactory implements PasswordHashProviderFa * When we run more, this only leads to an increased memory usage and to throttling of the process in containerized environments * when a CPU limit is imposed. The throttling would have a negative impact on other concurrent non-hashing activities of Keycloak. */ - private Semaphore cpuCoreSempahore; + private Semaphore cpuCoreSemaphore; private String version; private String type; @@ -41,18 +41,18 @@ public class Argon2PasswordHashProviderFactory implements PasswordHashProviderFa @Override public PasswordHashProvider create(KeycloakSession session) { - return new Argon2PasswordHashProvider(version, type, hashLength, memory, iterations, parallelism, cpuCoreSempahore); + return new Argon2PasswordHashProvider(version, type, hashLength, memory, iterations, parallelism, cpuCoreSemaphore); } @Override public void init(Config.Scope config) { version = config.get(VERSION_KEY, Argon2Parameters.DEFAULT_VERSION); - type = config.get(VERSION_KEY, Argon2Parameters.DEFAULT_TYPE); + type = config.get(TYPE_KEY, Argon2Parameters.DEFAULT_TYPE); hashLength = config.getInt(HASH_LENGTH_KEY, Argon2Parameters.DEFAULT_HASH_LENGTH); memory = config.getInt(MEMORY_KEY, Argon2Parameters.DEFAULT_MEMORY); iterations = config.getInt(ITERATIONS_KEY, Argon2Parameters.DEFAULT_ITERATIONS); parallelism = config.getInt(PARALLELISM_KEY, Argon2Parameters.DEFAULT_PARALLELISM); - cpuCoreSempahore = new Semaphore(config.getInt(CPU_CORES_KEY, Runtime.getRuntime().availableProcessors())); + cpuCoreSemaphore = new Semaphore(config.getInt(CPU_CORES_KEY, Runtime.getRuntime().availableProcessors())); } @Override @@ -89,7 +89,7 @@ public List getConfigMetadata() { .add(); builder.property() - .name(TYPE_KEY) + .name(HASH_LENGTH_KEY) .type("int") .helpText("Hash length") .defaultValue(Argon2Parameters.DEFAULT_HASH_LENGTH) diff --git a/distribution/api-docs-dist/pom.xml b/distribution/api-docs-dist/pom.xml index 49cef2855b5b..fe2100febe96 100755 --- a/distribution/api-docs-dist/pom.xml +++ b/distribution/api-docs-dist/pom.xml @@ -76,6 +76,10 @@ com.fasterxml.jackson.datatype jackson-datatype-jsr310 + + org.infinispan + infinispan-component-annotations + diff --git a/distribution/downloads/src/main/resources/files b/distribution/downloads/src/main/resources/files index 72c0a1ef161c..219a5b3a84a1 100644 --- a/distribution/downloads/src/main/resources/files +++ b/distribution/downloads/src/main/resources/files @@ -4,7 +4,6 @@ mvn:keycloak-api-docs-dist:keycloak-api-docs mvn:documentation/keycloak-documentation:keycloak-documentation npm:js/libs/keycloak-admin-client/target/keycloak-keycloak-admin-client-$$VERSION$$.tgz:keycloak-admin-client-$$VERSION$$.tgz -npm:js/libs/keycloak-js/target/keycloak-js-$$VERSION$$.tgz:keycloak-js-$$VERSION$$.tgz npm:js/libs/ui-shared/target/keycloak-keycloak-ui-shared-$$VERSION$$.tgz:keycloak-ui-shared-$$VERSION$$.tgz npm:js/apps/account-ui/target/keycloak-keycloak-account-ui-$$VERSION$$.tgz:keycloak-account-ui-$$VERSION$$.tgz npm:js/apps/admin-ui/target/keycloak-keycloak-admin-ui-$$VERSION$$.tgz:keycloak-admin-ui-$$VERSION$$.tgz diff --git a/distribution/maven-plugins/osv-scanner.toml b/distribution/maven-plugins/osv-scanner.toml new file mode 100644 index 000000000000..683c17048988 --- /dev/null +++ b/distribution/maven-plugins/osv-scanner.toml @@ -0,0 +1,10 @@ +# Ignore false positives for https://securityscorecards.dev/viewer/?uri=github.com/keycloak/keycloak + +# Suppress TestNG alert: +# - TestNG is brought in as a transitive dependency via groovy-testng. +# - Test dependencies are not included in the server distribution. +# - The latest groovy-testng version doesn't address the CVE. + +[[IgnoredVulns]] +id = "GHSA-rc2q-x9mf-w3vf" +reason = "suppressed because TestNG, a transitive dependency from groovy-testng, isn’t included in the server distribution." diff --git a/distribution/pom.xml b/distribution/pom.xml index c9feefa493a3..d2b8affb0c08 100755 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -41,6 +41,7 @@ + saml-adapters galleon-feature-packs licenses-common maven-plugins diff --git a/distribution/saml-adapters/pom.xml b/distribution/saml-adapters/pom.xml new file mode 100755 index 000000000000..e0bf923961d4 --- /dev/null +++ b/distribution/saml-adapters/pom.xml @@ -0,0 +1,37 @@ + + + + + keycloak-distribution-parent + org.keycloak + 999.0.0-SNAPSHOT + + + SAML Adapters Distribution Parent + + 4.0.0 + + keycloak-saml-adapters-distribution-parent + pom + + + wildfly-adapter + + + diff --git a/distribution/saml-adapters/shared-cli/adapter-elytron-install-saml-offline.cli b/distribution/saml-adapters/shared-cli/adapter-elytron-install-saml-offline.cli new file mode 100755 index 000000000000..75e9390b7072 --- /dev/null +++ b/distribution/saml-adapters/shared-cli/adapter-elytron-install-saml-offline.cli @@ -0,0 +1,61 @@ +embed-server --server-config=${server.config:standalone.xml} + +if (outcome != success) of /extension=org.keycloak.keycloak-saml-adapter-subsystem:read-resource + /extension=org.keycloak.keycloak-saml-adapter-subsystem/:add(module=org.keycloak.keycloak-saml-adapter-subsystem) +else + echo Keycloak SAML Extension already installed +end-if + +if (outcome != success) of /subsystem=keycloak-saml:read-resource + /subsystem=keycloak-saml:add +else + echo Keycloak SAML Subsystem already installed +end-if + +if (outcome != success) of /subsystem=elytron/custom-realm=KeycloakSAMLRealm:read-resource + /subsystem=elytron/custom-realm=KeycloakSAMLRealm:add(class-name=org.keycloak.adapters.saml.elytron.KeycloakSecurityRealm, module=org.keycloak.keycloak-saml-wildfly-elytron-adapter) +else + echo Keycloak SAML Realm already installed +end-if + +if (outcome != success) of /subsystem=elytron/security-domain=KeycloakDomain:read-resource + /subsystem=elytron/security-domain=KeycloakDomain:add(default-realm=KeycloakSAMLRealm,permission-mapper=default-permission-mapper,security-event-listener=local-audit,realms=[{realm=KeycloakSAMLRealm}]) +else + echo Keycloak Security Domain already installed. Trying to install Keycloak SAML Realm. + /subsystem=elytron/security-domain=KeycloakDomain:list-add(name=realms, value={realm=KeycloakSAMLRealm}) +end-if + +if (outcome != success) of /subsystem=elytron/constant-realm-mapper=keycloak-saml-realm-mapper:read-resource + /subsystem=elytron/constant-realm-mapper=keycloak-saml-realm-mapper:add(realm-name=KeycloakSAMLRealm) +else + echo Keycloak SAML Realm Mapper already installed +end-if + +if (outcome != success) of /subsystem=elytron/service-loader-http-server-mechanism-factory=keycloak-saml-http-server-mechanism-factory:read-resource + /subsystem=elytron/service-loader-http-server-mechanism-factory=keycloak-saml-http-server-mechanism-factory:add(module=org.keycloak.keycloak-saml-wildfly-elytron-adapter) +else + echo Keycloak SAML HTTP Mechanism Factory already installed +end-if + +if (outcome != success) of /subsystem=elytron/aggregate-http-server-mechanism-factory=keycloak-http-server-mechanism-factory:read-resource + /subsystem=elytron/aggregate-http-server-mechanism-factory=keycloak-http-server-mechanism-factory:add(http-server-mechanism-factories=[keycloak-saml-http-server-mechanism-factory, global]) +else + echo Keycloak HTTP Mechanism Factory already installed. Trying to install Keycloak SAML HTTP Mechanism Factory. + /subsystem=elytron/aggregate-http-server-mechanism-factory=keycloak-http-server-mechanism-factory:list-add(name=http-server-mechanism-factories, value=keycloak-saml-http-server-mechanism-factory) +end-if + +if (outcome != success) of /subsystem=elytron/http-authentication-factory=keycloak-http-authentication:read-resource + /subsystem=elytron/http-authentication-factory=keycloak-http-authentication:add(security-domain=KeycloakDomain,http-server-mechanism-factory=keycloak-http-server-mechanism-factory,mechanism-configurations=[{mechanism-name=KEYCLOAK-SAML,mechanism-realm-configurations=[{realm-name=KeycloakSAMLCRealm,realm-mapper=keycloak-saml-realm-mapper}]}]) +else + echo Keycloak HTTP Authentication Factory already installed. Trying to install Keycloak SAML Mechanism Configuration + /subsystem=elytron/http-authentication-factory=keycloak-http-authentication:list-add(name=mechanism-configurations, value={mechanism-name=KEYCLOAK-SAML,mechanism-realm-configurations=[{realm-name=KeycloakSAMLRealm,realm-mapper=keycloak-saml-realm-mapper}]}) +end-if + +if (outcome != success) of /subsystem=undertow/application-security-domain=other:read-resource + /subsystem=undertow/application-security-domain=other:add(http-authentication-factory=keycloak-http-authentication) +else + batch + /subsystem=undertow/application-security-domain=other:undefine-attribute(name=security-domain) + /subsystem=undertow/application-security-domain=other:write-attribute(name=http-authentication-factory,value=keycloak-http-authentication) + run-batch +end-if diff --git a/distribution/saml-adapters/shared-cli/adapter-elytron-install-saml.cli b/distribution/saml-adapters/shared-cli/adapter-elytron-install-saml.cli new file mode 100755 index 000000000000..dbd1efb73379 --- /dev/null +++ b/distribution/saml-adapters/shared-cli/adapter-elytron-install-saml.cli @@ -0,0 +1,59 @@ +if (outcome != success) of /extension=org.keycloak.keycloak-saml-adapter-subsystem:read-resource + /extension=org.keycloak.keycloak-saml-adapter-subsystem/:add(module=org.keycloak.keycloak-saml-adapter-subsystem) +else + echo Keycloak SAML Extension already installed +end-if + +if (outcome != success) of /subsystem=keycloak-saml:read-resource + /subsystem=keycloak-saml:add +else + echo Keycloak SAML Subsystem already installed +end-if + +if (outcome != success) of /subsystem=elytron/custom-realm=KeycloakSAMLRealm:read-resource + /subsystem=elytron/custom-realm=KeycloakSAMLRealm:add(class-name=org.keycloak.adapters.saml.elytron.KeycloakSecurityRealm, module=org.keycloak.keycloak-saml-wildfly-elytron-adapter) +else + echo Keycloak SAML Realm already installed +end-if + +if (outcome != success) of /subsystem=elytron/security-domain=KeycloakDomain:read-resource + /subsystem=elytron/security-domain=KeycloakDomain:add(default-realm=KeycloakSAMLRealm,permission-mapper=default-permission-mapper,security-event-listener=local-audit,realms=[{realm=KeycloakSAMLRealm}]) +else + echo Keycloak Security Domain already installed. Trying to install Keycloak SAML Realm. + /subsystem=elytron/security-domain=KeycloakDomain:list-add(name=realms, value={realm=KeycloakSAMLRealm}) +end-if + +if (outcome != success) of /subsystem=elytron/constant-realm-mapper=keycloak-saml-realm-mapper:read-resource + /subsystem=elytron/constant-realm-mapper=keycloak-saml-realm-mapper:add(realm-name=KeycloakSAMLRealm) +else + echo Keycloak SAML Realm Mapper already installed +end-if + +if (outcome != success) of /subsystem=elytron/service-loader-http-server-mechanism-factory=keycloak-saml-http-server-mechanism-factory:read-resource + /subsystem=elytron/service-loader-http-server-mechanism-factory=keycloak-saml-http-server-mechanism-factory:add(module=org.keycloak.keycloak-saml-wildfly-elytron-adapter) +else + echo Keycloak SAML HTTP Mechanism Factory already installed +end-if + +if (outcome != success) of /subsystem=elytron/aggregate-http-server-mechanism-factory=keycloak-http-server-mechanism-factory:read-resource + /subsystem=elytron/aggregate-http-server-mechanism-factory=keycloak-http-server-mechanism-factory:add(http-server-mechanism-factories=[keycloak-saml-http-server-mechanism-factory, global]) +else + echo Keycloak HTTP Mechanism Factory already installed. Trying to install Keycloak SAML HTTP Mechanism Factory. + /subsystem=elytron/aggregate-http-server-mechanism-factory=keycloak-http-server-mechanism-factory:list-add(name=http-server-mechanism-factories, value=keycloak-saml-http-server-mechanism-factory) +end-if + +if (outcome != success) of /subsystem=elytron/http-authentication-factory=keycloak-http-authentication:read-resource + /subsystem=elytron/http-authentication-factory=keycloak-http-authentication:add(security-domain=KeycloakDomain,http-server-mechanism-factory=keycloak-http-server-mechanism-factory,mechanism-configurations=[{mechanism-name=KEYCLOAK-SAML,mechanism-realm-configurations=[{realm-name=KeycloakSAMLCRealm,realm-mapper=keycloak-saml-realm-mapper}]}]) +else + echo Keycloak HTTP Authentication Factory already installed. Trying to install Keycloak SAML Mechanism Configuration + /subsystem=elytron/http-authentication-factory=keycloak-http-authentication:list-add(name=mechanism-configurations, value={mechanism-name=KEYCLOAK-SAML,mechanism-realm-configurations=[{realm-name=KeycloakSAMLRealm,realm-mapper=keycloak-saml-realm-mapper}]}) +end-if + +if (outcome != success) of /subsystem=undertow/application-security-domain=other:read-resource + /subsystem=undertow/application-security-domain=other:add(http-authentication-factory=keycloak-http-authentication) +else + batch + /subsystem=undertow/application-security-domain=other:undefine-attribute(name=security-domain) + /subsystem=undertow/application-security-domain=other:write-attribute(name=http-authentication-factory,value=keycloak-http-authentication) + run-batch +end-if diff --git a/distribution/saml-adapters/shared-cli/adapter-install-saml-offline.cli b/distribution/saml-adapters/shared-cli/adapter-install-saml-offline.cli new file mode 100755 index 000000000000..8c3ca4ca5889 --- /dev/null +++ b/distribution/saml-adapters/shared-cli/adapter-install-saml-offline.cli @@ -0,0 +1,3 @@ +embed-server --server-config=${server.config:standalone.xml} +/extension=org.keycloak.keycloak-saml-adapter-subsystem/:add(module=org.keycloak.keycloak-saml-adapter-subsystem) +/subsystem=keycloak-saml:add \ No newline at end of file diff --git a/distribution/saml-adapters/shared-cli/adapter-install-saml.cli b/distribution/saml-adapters/shared-cli/adapter-install-saml.cli new file mode 100755 index 000000000000..d1bdfa3ca4df --- /dev/null +++ b/distribution/saml-adapters/shared-cli/adapter-install-saml.cli @@ -0,0 +1,2 @@ +/extension=org.keycloak.keycloak-saml-adapter-subsystem/:add(module=org.keycloak.keycloak-saml-adapter-subsystem) +/subsystem=keycloak-saml:add \ No newline at end of file diff --git a/distribution/saml-adapters/wildfly-adapter/pom.xml b/distribution/saml-adapters/wildfly-adapter/pom.xml new file mode 100755 index 000000000000..b019240ae075 --- /dev/null +++ b/distribution/saml-adapters/wildfly-adapter/pom.xml @@ -0,0 +1,38 @@ + + + + + keycloak-parent + org.keycloak + 999.0.0-SNAPSHOT + ../../../pom.xml + + Keycloak Wildfly SAML Adapter + + 4.0.0 + + keycloak-saml-wildfly-adapter-dist-pom + pom + + + wildfly-modules + wildfly-adapter-zip + + + diff --git a/distribution/saml-adapters/wildfly-adapter/wildfly-adapter-zip/assembly.xml b/distribution/saml-adapters/wildfly-adapter/wildfly-adapter-zip/assembly.xml new file mode 100755 index 000000000000..feb22b8d3894 --- /dev/null +++ b/distribution/saml-adapters/wildfly-adapter/wildfly-adapter-zip/assembly.xml @@ -0,0 +1,58 @@ + + + + war-dist + + + zip + tar.gz + + false + + + + ${project.build.directory}/unpacked/modules + + **/** + + modules + + + ${project.build.directory}/unpacked/licenses + docs/licenses-keycloak + + + + + ../../shared-cli/adapter-install-saml.cli + bin + + + ../../shared-cli/adapter-install-saml-offline.cli + bin + + + ../../shared-cli/adapter-elytron-install-saml.cli + bin + + + ../../shared-cli/adapter-elytron-install-saml-offline.cli + bin + + + diff --git a/distribution/saml-adapters/wildfly-adapter/wildfly-adapter-zip/pom.xml b/distribution/saml-adapters/wildfly-adapter/wildfly-adapter-zip/pom.xml new file mode 100755 index 000000000000..d503885a0fb5 --- /dev/null +++ b/distribution/saml-adapters/wildfly-adapter/wildfly-adapter-zip/pom.xml @@ -0,0 +1,93 @@ + + + + 4.0.0 + + keycloak-parent + org.keycloak + 999.0.0-SNAPSHOT + ../../../../pom.xml + + + keycloak-saml-wildfly-adapter-dist + pom + Keycloak SAML Wildfly Adapter Distro + + + + + org.keycloak + keycloak-saml-wildfly-modules + zip + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + unpack + prepare-package + + unpack + + + + + org.keycloak + keycloak-saml-wildfly-modules + zip + ${project.build.directory}/unpacked + + + + + + + + + maven-assembly-plugin + + + assemble + package + + single + + + + assembly.xml + + + target + + + target/assembly/work + + false + + + + + + + diff --git a/distribution/saml-adapters/wildfly-adapter/wildfly-modules/assembly.xml b/distribution/saml-adapters/wildfly-adapter/wildfly-modules/assembly.xml new file mode 100755 index 000000000000..9d38002cfe3d --- /dev/null +++ b/distribution/saml-adapters/wildfly-adapter/wildfly-modules/assembly.xml @@ -0,0 +1,43 @@ + + + + dist + + + zip + + false + + + + src/main/resources/licenses/keycloak + licenses + + licenses.xml + + + + ${project.build.directory}/licenses + licenses + + + ${project.build.directory}/modules + modules + + + diff --git a/distribution/saml-adapters/wildfly-adapter/wildfly-modules/build.xml b/distribution/saml-adapters/wildfly-adapter/wildfly-modules/build.xml new file mode 100755 index 000000000000..48329548cbf4 --- /dev/null +++ b/distribution/saml-adapters/wildfly-adapter/wildfly-modules/build.xml @@ -0,0 +1,94 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/distribution/saml-adapters/wildfly-adapter/wildfly-modules/lib.xml b/distribution/saml-adapters/wildfly-adapter/wildfly-modules/lib.xml new file mode 100755 index 000000000000..005acd68eee0 --- /dev/null +++ b/distribution/saml-adapters/wildfly-adapter/wildfly-modules/lib.xml @@ -0,0 +1,270 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + "; + project.setProperty("current.maven.root", root); + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + "; + if(path.indexOf('${') != -1) { + throw "Module resource root not found, make sure it is listed in build/pom.xml" + path; + } + if(attributes.get("jandex") == "true" ) { + root = root + "\n\t"; + } + project.setProperty("current.resource.root", root); + ]]> + + + diff --git a/distribution/saml-adapters/wildfly-adapter/wildfly-modules/pom.xml b/distribution/saml-adapters/wildfly-adapter/wildfly-modules/pom.xml new file mode 100755 index 000000000000..8d7e26856ae9 --- /dev/null +++ b/distribution/saml-adapters/wildfly-adapter/wildfly-modules/pom.xml @@ -0,0 +1,226 @@ + + + + + + 4.0.0 + + + keycloak-parent + org.keycloak + 999.0.0-SNAPSHOT + ../../../../pom.xml + + + keycloak-saml-wildfly-modules + + Keycloak SAML Wildfly Modules + pom + + + org.keycloak + keycloak-common + + + * + * + + + + + org.keycloak + keycloak-adapter-spi + + + * + * + + + + + org.keycloak + keycloak-saml-core + + + * + * + + + + + org.keycloak + keycloak-saml-adapter-api-public + + + * + * + + + + + org.keycloak + keycloak-saml-adapter-core + + + * + * + + + + + org.keycloak + keycloak-jboss-adapter-core + + + * + * + + + + + org.keycloak + keycloak-saml-core-public + + + * + * + + + + + org.keycloak + keycloak-saml-wildfly-elytron-adapter + + + * + * + + + + + org.keycloak + keycloak-saml-wildfly-subsystem + + + * + * + + + + + org.keycloak + keycloak-saml-adapter-galleon-pack + zip + + + * + * + + + + + + + + + org.apache.maven.plugins + maven-antrun-plugin + false + + + build-dist + + run + + compile + + + + + + + + + + + + org.jboss + jandex + 1.0.3.Final + + + ant-contrib + ant-contrib + 1.0b3 + + + ant + ant + + + + + org.apache.ant + ant-apache-bsf + 1.9.3 + + + org.apache.bsf + bsf-api + 3.1 + + + rhino + js + 1.7R2 + + + + + maven-assembly-plugin + + + assemble + package + + single + + + + assembly.xml + + + target + + + target/assembly/work + + false + + + + + + org.keycloak + keycloak-distribution-licenses-maven-plugin + + + + diff --git a/distribution/saml-adapters/wildfly-adapter/wildfly-modules/src/main/resources/licenses/keycloak/licenses.xml b/distribution/saml-adapters/wildfly-adapter/wildfly-modules/src/main/resources/licenses/keycloak/licenses.xml new file mode 100644 index 000000000000..bac7cdfbcc59 --- /dev/null +++ b/distribution/saml-adapters/wildfly-adapter/wildfly-modules/src/main/resources/licenses/keycloak/licenses.xml @@ -0,0 +1,5 @@ + + + + + diff --git a/docs/building.md b/docs/building.md index 11184fae47ac..b40809c2d3b4 100644 --- a/docs/building.md +++ b/docs/building.md @@ -1,10 +1,16 @@ ## Building from source -Ensure you have JDK 21 (or newer) and Git installed +Ensure you have **JDK 17** or **JDK 21** and Git installed java -version git --version +Newer versions of the JDK are not supported. If you have multiple JDK versions +installed, you can specify which one to use during the build by setting the `JAVA_HOME` +environment variable (this should be the directory containing `/bin/` or `/jre/`). + + JAVA_HOME=/path/to/jdk-21/ ./mvnw clean install + Instead of using a locally installed Maven, call the Maven wrapper script `mvnw` in the main folder of the project. This will use the Maven version which is supported by this project. diff --git a/docs/documentation/authorization_services/images/getting-started/hello-world/authz-settings.png b/docs/documentation/authorization_services/images/getting-started/hello-world/authz-settings.png index 5214a2fbaa6a..e038eb654f88 100644 Binary files a/docs/documentation/authorization_services/images/getting-started/hello-world/authz-settings.png and b/docs/documentation/authorization_services/images/getting-started/hello-world/authz-settings.png differ diff --git a/docs/documentation/authorization_services/images/getting-started/hello-world/create-client.png b/docs/documentation/authorization_services/images/getting-started/hello-world/create-client.png index e59680027c21..df6b80a70fdd 100644 Binary files a/docs/documentation/authorization_services/images/getting-started/hello-world/create-client.png and b/docs/documentation/authorization_services/images/getting-started/hello-world/create-client.png differ diff --git a/docs/documentation/authorization_services/images/getting-started/hello-world/create-scope.png b/docs/documentation/authorization_services/images/getting-started/hello-world/create-scope.png index 55147e663fcb..4897878f94d9 100644 Binary files a/docs/documentation/authorization_services/images/getting-started/hello-world/create-scope.png and b/docs/documentation/authorization_services/images/getting-started/hello-world/create-scope.png differ diff --git a/docs/documentation/authorization_services/images/getting-started/hello-world/create-user.png b/docs/documentation/authorization_services/images/getting-started/hello-world/create-user.png index 097e65703bbc..ee1e67bbdcc3 100644 Binary files a/docs/documentation/authorization_services/images/getting-started/hello-world/create-user.png and b/docs/documentation/authorization_services/images/getting-started/hello-world/create-user.png differ diff --git a/docs/documentation/authorization_services/images/getting-started/hello-world/enable-authz.png b/docs/documentation/authorization_services/images/getting-started/hello-world/enable-authz.png index d7a6e4d39820..e1cce37dfe2c 100644 Binary files a/docs/documentation/authorization_services/images/getting-started/hello-world/enable-authz.png and b/docs/documentation/authorization_services/images/getting-started/hello-world/enable-authz.png differ diff --git a/docs/documentation/authorization_services/images/getting-started/kc-start-page.png b/docs/documentation/authorization_services/images/getting-started/kc-start-page.png index e20eb9bdb95d..c4e53d2ec030 100644 Binary files a/docs/documentation/authorization_services/images/getting-started/kc-start-page.png and b/docs/documentation/authorization_services/images/getting-started/kc-start-page.png differ diff --git a/docs/documentation/authorization_services/images/permission/create-resource.png b/docs/documentation/authorization_services/images/permission/create-resource.png index e4b1c1d36c4b..487bd61b7c39 100644 Binary files a/docs/documentation/authorization_services/images/permission/create-resource.png and b/docs/documentation/authorization_services/images/permission/create-resource.png differ diff --git a/docs/documentation/authorization_services/images/permission/create-scope.png b/docs/documentation/authorization_services/images/permission/create-scope.png index 6ba0c886a36d..1db4839e0140 100644 Binary files a/docs/documentation/authorization_services/images/permission/create-scope.png and b/docs/documentation/authorization_services/images/permission/create-scope.png differ diff --git a/docs/documentation/authorization_services/images/permission/typed-resource-perm-example.png b/docs/documentation/authorization_services/images/permission/typed-resource-perm-example.png index 678bc4474789..1e04d4bf1590 100644 Binary files a/docs/documentation/authorization_services/images/permission/typed-resource-perm-example.png and b/docs/documentation/authorization_services/images/permission/typed-resource-perm-example.png differ diff --git a/docs/documentation/authorization_services/images/permission/view.png b/docs/documentation/authorization_services/images/permission/view.png index fa4a185c738f..5bdda9026318 100644 Binary files a/docs/documentation/authorization_services/images/permission/view.png and b/docs/documentation/authorization_services/images/permission/view.png differ diff --git a/docs/documentation/authorization_services/images/policy-evaluation-tool/policy-evaluation-tool.png b/docs/documentation/authorization_services/images/policy-evaluation-tool/policy-evaluation-tool.png index 079b0cb6b74c..b69bba96c39a 100644 Binary files a/docs/documentation/authorization_services/images/policy-evaluation-tool/policy-evaluation-tool.png and b/docs/documentation/authorization_services/images/policy-evaluation-tool/policy-evaluation-tool.png differ diff --git a/docs/documentation/authorization_services/images/policy/create-aggregated.png b/docs/documentation/authorization_services/images/policy/create-aggregated.png index b7fe7d050f79..548b5cb65c2a 100644 Binary files a/docs/documentation/authorization_services/images/policy/create-aggregated.png and b/docs/documentation/authorization_services/images/policy/create-aggregated.png differ diff --git a/docs/documentation/authorization_services/images/policy/create-client-scope.png b/docs/documentation/authorization_services/images/policy/create-client-scope.png index 2412a3e84529..2241699bedcb 100644 Binary files a/docs/documentation/authorization_services/images/policy/create-client-scope.png and b/docs/documentation/authorization_services/images/policy/create-client-scope.png differ diff --git a/docs/documentation/authorization_services/images/policy/create-client.png b/docs/documentation/authorization_services/images/policy/create-client.png index f16e8c2ca995..27430f80b1e2 100644 Binary files a/docs/documentation/authorization_services/images/policy/create-client.png and b/docs/documentation/authorization_services/images/policy/create-client.png differ diff --git a/docs/documentation/authorization_services/images/policy/create-group-extend-children.png b/docs/documentation/authorization_services/images/policy/create-group-extend-children.png index 36c1cee1101c..eedd6fee8cfa 100644 Binary files a/docs/documentation/authorization_services/images/policy/create-group-extend-children.png and b/docs/documentation/authorization_services/images/policy/create-group-extend-children.png differ diff --git a/docs/documentation/authorization_services/images/policy/create-group.png b/docs/documentation/authorization_services/images/policy/create-group.png index 09691f3837ae..461800e428e1 100644 Binary files a/docs/documentation/authorization_services/images/policy/create-group.png and b/docs/documentation/authorization_services/images/policy/create-group.png differ diff --git a/docs/documentation/authorization_services/images/policy/create-regex.png b/docs/documentation/authorization_services/images/policy/create-regex.png index e9bb82677029..569a0110d0f3 100644 Binary files a/docs/documentation/authorization_services/images/policy/create-regex.png and b/docs/documentation/authorization_services/images/policy/create-regex.png differ diff --git a/docs/documentation/authorization_services/images/policy/create-time.png b/docs/documentation/authorization_services/images/policy/create-time.png index 3cc3a62794e7..ee8b41338014 100644 Binary files a/docs/documentation/authorization_services/images/policy/create-time.png and b/docs/documentation/authorization_services/images/policy/create-time.png differ diff --git a/docs/documentation/authorization_services/images/policy/create-user.png b/docs/documentation/authorization_services/images/policy/create-user.png index 5ece77b8e374..755d8c35bea6 100644 Binary files a/docs/documentation/authorization_services/images/policy/create-user.png and b/docs/documentation/authorization_services/images/policy/create-user.png differ diff --git a/docs/documentation/authorization_services/images/policy/view.png b/docs/documentation/authorization_services/images/policy/view.png index 361ff3a25aa7..c39e64264596 100644 Binary files a/docs/documentation/authorization_services/images/policy/view.png and b/docs/documentation/authorization_services/images/policy/view.png differ diff --git a/docs/documentation/authorization_services/images/resource-server/authz-export.png b/docs/documentation/authorization_services/images/resource-server/authz-export.png index 2ebd4acab182..59876dfdd699 100644 Binary files a/docs/documentation/authorization_services/images/resource-server/authz-export.png and b/docs/documentation/authorization_services/images/resource-server/authz-export.png differ diff --git a/docs/documentation/authorization_services/images/resource-server/authz-settings.png b/docs/documentation/authorization_services/images/resource-server/authz-settings.png index 23a299057da4..e038eb654f88 100644 Binary files a/docs/documentation/authorization_services/images/resource-server/authz-settings.png and b/docs/documentation/authorization_services/images/resource-server/authz-settings.png differ diff --git a/docs/documentation/authorization_services/images/resource-server/client-create.png b/docs/documentation/authorization_services/images/resource-server/client-create.png index 4344ac163eda..4c7a315adf1d 100644 Binary files a/docs/documentation/authorization_services/images/resource-server/client-create.png and b/docs/documentation/authorization_services/images/resource-server/client-create.png differ diff --git a/docs/documentation/authorization_services/images/resource-server/client-enable-authz.png b/docs/documentation/authorization_services/images/resource-server/client-enable-authz.png index 7024f36f490e..1dbc9a35ac87 100644 Binary files a/docs/documentation/authorization_services/images/resource-server/client-enable-authz.png and b/docs/documentation/authorization_services/images/resource-server/client-enable-authz.png differ diff --git a/docs/documentation/authorization_services/images/resource-server/client-list.png b/docs/documentation/authorization_services/images/resource-server/client-list.png index d92fa79169d4..20613357f366 100644 Binary files a/docs/documentation/authorization_services/images/resource-server/client-list.png and b/docs/documentation/authorization_services/images/resource-server/client-list.png differ diff --git a/docs/documentation/authorization_services/images/resource-server/client-settings.png b/docs/documentation/authorization_services/images/resource-server/client-settings.png index 9268a982e383..3fd6531b573c 100644 Binary files a/docs/documentation/authorization_services/images/resource-server/client-settings.png and b/docs/documentation/authorization_services/images/resource-server/client-settings.png differ diff --git a/docs/documentation/authorization_services/images/resource-server/default-permission.png b/docs/documentation/authorization_services/images/resource-server/default-permission.png index f9632a742291..f3424d48cdf7 100644 Binary files a/docs/documentation/authorization_services/images/resource-server/default-permission.png and b/docs/documentation/authorization_services/images/resource-server/default-permission.png differ diff --git a/docs/documentation/authorization_services/images/resource-server/default-policy.png b/docs/documentation/authorization_services/images/resource-server/default-policy.png index 40f4ed431e4a..ecec90194d71 100644 Binary files a/docs/documentation/authorization_services/images/resource-server/default-policy.png and b/docs/documentation/authorization_services/images/resource-server/default-policy.png differ diff --git a/docs/documentation/authorization_services/images/resource-server/default-resource.png b/docs/documentation/authorization_services/images/resource-server/default-resource.png index e9a6a9779ffb..59b789231166 100644 Binary files a/docs/documentation/authorization_services/images/resource-server/default-resource.png and b/docs/documentation/authorization_services/images/resource-server/default-resource.png differ diff --git a/docs/documentation/authorization_services/images/resource/create.png b/docs/documentation/authorization_services/images/resource/create.png index 7280ed78f2e2..366da990d62c 100644 Binary files a/docs/documentation/authorization_services/images/resource/create.png and b/docs/documentation/authorization_services/images/resource/create.png differ diff --git a/docs/documentation/authorization_services/images/resource/view.png b/docs/documentation/authorization_services/images/resource/view.png index eeee62d9199f..02b8a5f43ff2 100644 Binary files a/docs/documentation/authorization_services/images/resource/view.png and b/docs/documentation/authorization_services/images/resource/view.png differ diff --git a/docs/documentation/authorization_services/images/service/rs-uma-protection-role.png b/docs/documentation/authorization_services/images/service/rs-uma-protection-role.png index ef460cd1c8ab..89ef67eccc43 100644 Binary files a/docs/documentation/authorization_services/images/service/rs-uma-protection-role.png and b/docs/documentation/authorization_services/images/service/rs-uma-protection-role.png differ diff --git a/docs/documentation/authorization_services/topics/getting-started-overview.adoc b/docs/documentation/authorization_services/topics/getting-started-overview.adoc index 55e04eba6daa..53b831335af4 100644 --- a/docs/documentation/authorization_services/topics/getting-started-overview.adoc +++ b/docs/documentation/authorization_services/topics/getting-started-overview.adoc @@ -4,7 +4,7 @@ For certain applications, you can look at the following resources to quickly get started with {project_name} Authorization Services: -* {quickstartRepo_link}/tree/latest/jakarta/servlet-authz-client[Securing a JakartaEE Application in Wildfly] -* {quickstartRepo_link}/tree/latest/spring/rest-authz-resource-server[Securing a Spring Boot Application] +* {quickstartRepo_link}/tree/main/jakarta/servlet-authz-client[Securing a JakartaEE Application in Wildfly] +* {quickstartRepo_link}/tree/main/spring/rest-authz-resource-server[Securing a Spring Boot Application] * link:https://quarkus.io/guides/security-keycloak-authorization[Securing Quarkus Applications] -* *Keycloak Node.js adapter* in the link:{securing_apps_link}[securing apps] section +* link:https://www.keycloak.org/securing-apps/nodejs-adapter[Keycloak Node.js adapter] diff --git a/docs/documentation/authorization_services/topics/policy-evaluation-api.adoc b/docs/documentation/authorization_services/topics/policy-evaluation-api.adoc index 731907861005..cab4c47b99db 100644 --- a/docs/documentation/authorization_services/topics/policy-evaluation-api.adoc +++ b/docs/documentation/authorization_services/topics/policy-evaluation-api.adoc @@ -99,11 +99,11 @@ The `EvaluationContext` also gives you access to attributes related to both the | String. Format `MM/dd/yyyy hh:mm:ss` | kc.client.network.ip_address -| IPv4 address of the client +| IP address of the client, can be null if a valid IP is not provided. | String | kc.client.network.host -| Client's host name +| Client's host name, will be the IP address or whatever is provided by proxy headers | String | kc.client.id diff --git a/docs/documentation/authorization_services/topics/resource-view.adoc b/docs/documentation/authorization_services/topics/resource-view.adoc index 3cf8433c86cc..45eec52f9eaf 100644 --- a/docs/documentation/authorization_services/topics/resource-view.adoc +++ b/docs/documentation/authorization_services/topics/resource-view.adoc @@ -9,8 +9,8 @@ image:images/resource/view.png[alt="Resources"] The resource list provides information about the protected resources, such as: * Type -* URIS * Owner +* URIs * Associated scopes, if any * Associated permissions diff --git a/docs/documentation/release_notes/index.adoc b/docs/documentation/release_notes/index.adoc index 805b42a52018..7c5a58ebddf2 100644 --- a/docs/documentation/release_notes/index.adoc +++ b/docs/documentation/release_notes/index.adoc @@ -13,6 +13,21 @@ include::topics/templates/document-attributes.adoc[] :release_header_latest_link: {releasenotes_link_latest} include::topics/templates/release-header.adoc[] +== {project_name_full} 26.4.0 +include::topics/26_4_0.adoc[leveloffset=2] + +== {project_name_full} 26.3.0 +include::topics/26_3_0.adoc[leveloffset=2] + +== {project_name_full} 26.2.0 +include::topics/26_2_0.adoc[leveloffset=2] + +== {project_name_full} 26.1.3 +include::topics/26_1_3.adoc[leveloffset=2] + +== {project_name_full} 26.1.1 +include::topics/26_1_1.adoc[leveloffset=2] + == {project_name_full} 26.1.0 include::topics/26_1_0.adoc[leveloffset=2] diff --git a/docs/documentation/release_notes/topics/26_1_0.adoc b/docs/documentation/release_notes/topics/26_1_0.adoc index cd8172c95f1d..80b00b3043e5 100644 --- a/docs/documentation/release_notes/topics/26_1_0.adoc +++ b/docs/documentation/release_notes/topics/26_1_0.adoc @@ -15,7 +15,7 @@ See the https://www.keycloak.org/server/caching[Configuring distributed caches] = Virtual Threads enabled for Infinispan and JGroups thread pools -Starting from this release, {project_name} automatically enables the virtual thread pool support in both the embedded Infinispan and JGroups when running on OpenJDK 21. +Starting from this release, {project_name} automatically enables the virtual thread pool support in both the embedded Infinispan and JGroups when running on OpenJDK 21 for environments with at least 2 CPU cores available. This removes the need to configure the JGroups thread pool, the need to align the JGroups thread pool with the HTTP worker thread pool, and reduces the overall memory footprint. = OpenTelemetry Tracing supported diff --git a/docs/documentation/release_notes/topics/26_1_1.adoc b/docs/documentation/release_notes/topics/26_1_1.adoc new file mode 100644 index 000000000000..1bf7ece2bc41 --- /dev/null +++ b/docs/documentation/release_notes/topics/26_1_1.adoc @@ -0,0 +1,11 @@ += New option in X.509 authenticator to abort authentication if CRL is outdated + +The X.509 authenticator has a new option `x509-cert-auth-crl-abort-if-non-updated` (*CRL abort if non updated* in the Admin Console) to abort the login if a CRL is configured to validate the certificate and the CRL is not updated in the time specified in the next update field. The new option defaults to `true` in the Admin Console. For more details about the CRL next update field, see link:https://datatracker.ietf.org/doc/html/rfc5280#section-5.1.2.5[RFC5280, Section-5.1.2.5]. + +The value `false` is maintained for compatibility with the previous behavior. Note that existing configurations will not have the new option and will act as if this option was set to `false`, but the Admin Console will add the default value `true` on edit. + += New option in Send Reset Email to force a login after reset credentials + +The `reset-credential-email` (*Send Reset Email*) is the authenticator used in the *reset credentials* flow (*forgot password* feature) for sending the email to the user with the reset credentials token link. This authenticator now has a new option `force-login` (*Force login after reset*). When this option is set to `true`, the authenticator terminates the session and forces a new login. + +For more details about this new option, see link:{adminguide_link}#enabling-forgot-password[Enable forgot password]. \ No newline at end of file diff --git a/docs/documentation/release_notes/topics/26_1_3.adoc b/docs/documentation/release_notes/topics/26_1_3.adoc new file mode 100644 index 000000000000..c428b5bb8837 --- /dev/null +++ b/docs/documentation/release_notes/topics/26_1_3.adoc @@ -0,0 +1,5 @@ += Send Reset Email force login again for federated users after reset credentials + +In <> a new configuration option was added to the `reset-credential-email` (*Send Reset Email*) authenticator to allow changing the default behavior after the reset credentials flow. Now the option `force-login` (*Force login after reset*) is adding a third configuration value `only-federated`, which means that the force login is true for federated users and false for the internal database users. The new behavior is now the default. This way all users managed by user federation providers, whose implementation can be not so tightly integrated with {project_name}, are forced to login again after the reset credentials flow to avoid any issue. This change in behavior is due to the secure by default policy. + +For more information, see link:{adminguide_link}#enabling-forgot-password[Enable forgot password]. \ No newline at end of file diff --git a/docs/documentation/release_notes/topics/26_2_0.adoc b/docs/documentation/release_notes/topics/26_2_0.adoc new file mode 100644 index 000000000000..1dd1f133e774 --- /dev/null +++ b/docs/documentation/release_notes/topics/26_2_0.adoc @@ -0,0 +1,173 @@ += Supported Standard Token Exchange + +In this release, we added support for the Standard token exchange! The token exchange feature was in preview for a long time, so we are glad to finally support the standard token exchange. +For now, this is limited to exchanging the Internal token to internal token compliant with the https://datatracker.ietf.org/doc/html/rfc8693[Token exchange specification]. It does not yet cover use +cases related to identity brokering or subject impersonation. We hope to support even more token exchange use cases in subsequent releases. + +For more details, see the link:{securing_apps_token_exchange_link}#_standard-token-exchange[Standard token exchange]. + +For information on how to upgrade from the legacy token exchange used in previous {project_name} versions, see the link:{upgradingguide_link}[{upgradingguide_name}]. + += Fine-grained admin permissions supported + +This release introduces support for a new version of fine-grained admin permissions. Version 2 (V2) provides enhanced flexibility and control over administrative access within realms. +With this feature, administrators can define permissions for administering users, groups, clients, and roles without relying on broad administrative roles. V2 offers the same level of access control over realm resources as the previous version, with plans to extend its capabilities in future versions. Some key points follow: + +* *Centralized Admin Console Management* - New *Permissions* section was introduced to allow management from a single place without having to navigate to different places in the Admin Console. +* *Improved manageability* - Administrators can more easily search and evaluate permissions when building a permission model for realm resources. +* *Resource-Specific and Global Permissions* – Permissions can be defined for individual resources (such as specific users or groups), or entire resource types (such as all users or all groups). +* *Explicit Operation Scoping* – Permissions are now independent, removing hidden dependencies between operations. Administrators must assign each scope explicitly, making it easier to see what is granted without needing prior knowledge of implicit relationships. +* *Per-Realm Enablement* – Fine-Grained Admin Permissions can be enabled on a per-realm basis, allowing greater control over adoption and configuration. + +For more details, see link:{adminguide_finegrained_link}[{adminguide_finegrained_name}]. + +For more information about migration, see the link:{upgradingguide_link}[{upgradingguide_name}]. + += Guides for metrics and Grafana dashboards + +In addition to the list of useful metric names link:{observablitycategory_link}[the Observability guides category] now also contains a guide on how to display these metrics in Grafana. +link:{grafanadashboards_link}[The guide] contains two dashboards. + +* Keycloak troubleshooting dashboard - showing metrics related to service level indicators and troubleshooting. +* Keycloak capacity planning dashboard - showing metrics related to estimating the load handled by Keycloak. + += Zero-configuration secure cluster communication + +For clustering multiple nodes, {project_name} uses distributed caches. +Starting with this release for all TCP-based transport stacks, the communication between the nodes is encrypted with TLS and secured with automatically generated ephemeral keys and certificates. + +This strengthens a secure-by-default setup and minimizes the configuration steps of new setups. + +For more information, check the link:https://www.keycloak.org/server/caching#_securing_transport_stacks[Securing Transport Stacks] in the distributed caches guide. + += Rolling updates for optimized and customized images + +When using an optimized or customized image, the {project_name} Operator can now perform a rolling update for a new image if the old and the new image contain the same version of {project_name}. +This is helpful when you want to roll out, for example, an updated theme or provider without downtime. + +To use the functionality in the Operator, enable the `Auto` update strategy and the {project_name} Operator will on image change briefly start up the old and the new image to determine if a rolling update without downtime is possible. +Read the section https://www.keycloak.org/operator/rolling-updates[Managing Rolling Updates] in the {project_name} Operator Advanced Configuration guide for more details on this functionality. + +The checks to determine if a rolling update is possible are also available on the {project_name} command line so you can use them in your deployment pipeline. Continue reading in the https://www.keycloak.org/server/update-compatibility[Update Compatibility Tool] guide for more information about the functionality available on the command line. + += Metrics on user activities + +Event metrics provide admins an aggregated view of the different user activities in a Keycloak instance. +For now, only metrics for user events are captured. For example, you can monitor the number of logins, login failures, or token refreshes performed. +For more information, see Monitoring user activities with event metrics. + +While this was a preview feature in 26.1, this is now fully supported in 26.2. + +ifeval::[{project_community}==true] +Many thanks to https://github.com/bohmber[Bernd Bohmann] for the contribution. +endif::[] + +For more information, check the link:https://www.keycloak.org/observability/event-metrics[Monitoring user activities with event metrics] {section}. + += Additional query parameters in Admin Events API + +The Admin Events API now supports filtering for events based on Epoc timestamps in addition to the previous +`yyyy-MM-dd` format. This provides more fine-grained control of the window of events to retrieve. + +A `direction` query parameter was also added, allowing controlling the order of returned items as `asc` or +`desc`. In the past the events where always returned in `desc` order (most recent events first). + +Finally, the returned event representations now also include the `id`, which provides a unique identifier for +an event. + += Logs support ECS format + +All available log handlers now support *ECS* (Elastic Common Schema) JSON format. +It helps to improve {project_name}'s observability story and centralized logging. + +For more details, see the https://www.keycloak.org/server/logging[Logging guide]. + += New cache for CRLs loaded for the X.509 authenticator + +Now the Certificate Revocation Lists (CRL), that are used to validate certificates in the X.509 authenticator, are cached inside a new infinispan cache called `crl`. Caching improves the validation performance and decreases the memory consumption because just one CRL is maintained per source. + +Check the `crl-storage` section in the link:https://www.keycloak.org/server/all-provider-config[All provider configuration] {section} to know the options for the new cache provider. + += Operator creates NetworkPolicies to restrict traffic + +The {project_name} Operator now creates by default a NetworkPolicy to restrict traffic to internal ports used for {project_name}'s distributed caches. + +This strengthens a secure-by-default setup and minimizes the configuration steps of new setups. + +You can restrict the access to the management and HTTP endpoints further using the Kubernetes NetworkPolicies rule syntax. + +Read more about this in the https://www.keycloak.org/operator/advanced-configuration[Operator Advanced configuration]. + += Option to reload trust and key material for the management interface + +The `https-management-certificates-reload-period` option can be set to define the reloading period of key store, trust store, and certificate files referenced by `https-management-*` options for the management interface. +Use -1 to disable reloading. Defaults to `https-certificates-reload-period`, which defaults to 1h (one hour). + +For more information, check the link:https://www.keycloak.org/server/management-interface#_tls_support[Configuring the Management Interface] guide. + += Dynamic Authentication Flow selection using Client Policies + +Introduced the ability to dynamically select authentication flows based on conditions such as requested scopes, ACR (Authentication Context Class Reference) and others. +This can be achieved using link:{adminguide_link}#_client_policies[Client Policies] by combining the new `AuthenticationFlowSelectorExecutor` with conditions like the new `ACRCondition`. For more details, see the link:{adminguide_link}#_client-policy-auth-flow[{adminguide_name}]. + += JWT Client authentication aligned with the latest OIDC specification + +The latest version of the link:https://openid.net/specs/openid-connect-core-1_0-36.html#rfc.section.9[OpenID Connect Core Specification] tightened the rules for +audience validation in JWT client assertions for the Client Authentication methods `private_key_jwt` and `client_secret_jwt` . {project_name} now enforces by default that there is single audience +in the JWT token used for client authentication. + +For information on the changed audience validation in JWT Client authentication {project_name} versions, see the link:{upgradingguide_link}[{upgradingguide_name}]. + +ifeval::[{project_community}==true] +Many thanks to https://github.com/thomasdarimont[Thomas Darimont] for the contribution. +endif::[] + += Federated credentials are available now when fetching user credentials + +Until now, querying user credentials using the User API will not return credentials managed by user storage providers and, as a consequence, +prevent fetching additional metadata associated with federated credentials like the last time a credential was updated. + +In this release, we are adding a new method `getCredentials(RealmModel, UserModel)` to the `org.keycloak.credential.CredentialInputUpdater` interface so that +user storage providers can return the credentials they manage for a specific user in a realm. By doing this, user storage providers can indicate +whether the credential is linked to it as well as provide additional metadata so that additional information can be shown when managing users through the administration console. + +For LDAP, it should be possible now to see the last time the password was updated based on the standard `pwdChangedTime` attribute or, if +using Microsoft AD, based on the `pwdLastSet` attribute. + +In order to check if a credential is local - managed by {project_name} - or federated, you can check the `federationLink` property available from both +`CredentialRepresentation` and `CredentialModel` types. If set, the `federationLink` property holds the UUID of the component model associated with a given +user storage provider. + += Token based authentication for SMTP (XOAUTH2) + +The Keycloak outgoing link:{adminguide_email_link}[SMTP mail configuration] now supports token authentication (XOAUTH2). +Many service providers (Microsoft, Google) are moving towards SMTP OAuth authentication and end the support for basic authentication. +The token is gathered using Client Credentials Grant. + +ifeval::[{project_community}==true] +Many thanks to https://github.com/srose[Sebastian Rose] for the contribution. +endif::[] + += New client configuration for access token header type + +A new admin setting has been added: Clients -> Advanced -> Fine grain OpenID Connect configuration -> Use "at+jwt" as access token header type + +If enabled, access tokens will get header type `at+jwt` in compliance with https://datatracker.ietf.org/doc/html/rfc9068#section-2.1[rfc9068#section-2.1]. Otherwise, the access token header type will be `JWT`. + +This setting is turned off by default. + +ifeval::[{project_community}==true] +Many thanks to https://github.com/laurids[Laurids Møller Jepsen] for the contribution. +endif::[] + +ifeval::[{project_community}==true] += OpenID for Verifiable Credential Issuance documentation + +The OpenID for Verifiable Credential Issuance (OID4VCI) remains an experimental feature in {project_name}, but it received further improvements and especially the link:{adminguide_link}#_oid4vci[The documentation], +with the steps how to try this feature. + +You will find significant development and discussions in the https://github.com/keycloak/kc-sig-fapi[Keycloak OAuth SIG]. Anyone from the Keycloak community is welcome to join and provide the feedback. + +Many thanks to all members of the OAuth SIG group for the participation in the development and discussions about this feature. Especially thanks to +https://github.com/Awambeng[Awambeng Rodrick] and https://github.com/IngridPuppet[Ingrid Kamga]. +endif::[] diff --git a/docs/documentation/release_notes/topics/26_3_0.adoc b/docs/documentation/release_notes/topics/26_3_0.adoc new file mode 100644 index 000000000000..4a725e99e73e --- /dev/null +++ b/docs/documentation/release_notes/topics/26_3_0.adoc @@ -0,0 +1,88 @@ +// Release notes should contain only headline-worthy new features, +// assuming that people who migrate will read the upgrading guide anyway. + +This release delivers advancements to optimize your system and improve the experience of users, developers and administrators: + +* *Account recovery* with 2FA recovery codes, protecting users from lockout. +* Simplified experiences for application developers with *streamlined WebAuthn/Passkey registration* and *simplified account linking* to identity providers via application initiated actions. +* Broader connectivity with the ability to *broker with any OAuth 2.0 compliant authorization server*, and enhanced *trusted email verification* for OpenID Connect providers. +* *Asynchronous logging* for higher throughput and lower latency, ensuring more efficient deployments. +* For administrators, *experimental rolling updates for patch releases* mean minimized downtime and smoother upgrades. + +Read on to learn more about each new feature, and https://www.keycloak.org/docs/latest/upgrading/index.html[find additional details in the upgrading guide] if you are upgrading from a previous release of {project_name}. + += Recovering your account if you lose your 2FA credentials + +When using for example a one-time-password (OTP) generators as a second factor for authenticating users (2FA), a user can get locked out of their account when they, for example, lose their phone that contains the OTP generator. +To prepare for such a case, the recovery codes feature allows users to print a set of recovery codes as an additional second factor. +If the recovery codes are then allowed as an alternative 2FA in the login flow, they can be used instead of the OTP generated passwords. + +With this release, the recovery codes feature is promoted from preview to a supported feature. +For newly created realms, the browser flow now includes the Recovery Authentication Code Form as _Disabled_, and it can be switched to _Alternative_ by admins if they want to use this feature. + +For more information about this 2FA method, see the link:{adminguide_link}#_recovery-codes[Recovery Codes] chapter in the {adminguide_name}. + += Performance improvements to import, export and migration + +The time it takes to run imports, exports or migrations involving a large number of realms has been improved. There is no longer a cumulative performance degradation for each additional realm processed. + += Simplified registration for WebAuthn and Passkeys + +Both WebAuthn Register actions (`webauthn-register` and `webauthn-register-passwordless`) which are also used for Passkeys now support a parameter `skip_if_exists` when initiated by the application (AIA). + +This should make it more convenient to use the AIA in scenarios where a user has already set up WebAuthn or Passkeys. +The parameter allows skipping the action if the user already has a credential of that type. + +For more information, see the link:{adminguide_link}#_webauthn_aia[Registering WebAuthn credentials using AIA] chapter in the {adminguide_name}. + += Simplified linking of the user account to an identity provider + +Client-initiated linking a user account to the identity provider is now based on application-initiated action (AIA) implementation. +This functionality aligns configuring this functionality and simplifies the error handling the calling of the client application, +making it more useful for a broader audience. + +The custom protocol, which was previously used for client-initiated account linking, is now deprecated. + += Brokering with OAuth v2 compliant authorization servers + +In previous releases {project_name} already supported federation with other OpenID Connect and SAML providers, as well as with several Social Providers like GitHub and Google which are based on OAuth 2.0. + +The new OAuth 2.0 broker now closes the gap to federate with any OAuth 2.0 provider. +This then allows you to federate, for example, with Amazon or other providers. +As this is a generic provider, you will need to specify the different claims and a user info endpoint in the provider's configuration. + +For more information, see the link:{adminguide_link}#_identity_broker_oauth[OAuth v2 identity providers] chapter in the {adminguide_name}. + += Trusted email verification when brokering OpenID Connect Providers + +Until now, the OpenID Connect broker did not support the standard `email_verified` claim available from the ID Tokens issued by OpenID Connect Providers. + +Starting with this release, {project_name} supports this standard claim as defined by the https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims[OpenID Connect Core Specification] for federation. + +Whenever users are federated for the first time or re-authenticating and if the *Trust email* setting is enabled, *Sync Mode* is set to `FORCE` and the provider sends the `email_verified` claim, the user account will have their email marked according to the `email_verified` claim. +If the provider does not send the claim, it defaults to the original behavior and sets the email as verified. + += Asynchronous logging for higher throughput and lower latency + +All available log handlers now support asynchronous logging capabilities. +Asynchronous logging helps deployments that require high throughput and low latency. + +For more details on this opt-in feature, see the https://www.keycloak.org/server/logging[Logging guide]. + += Rolling updates for patch releases for minimized downtime (preview) + +In the previous release, the Keycloak Operator was enhanced to support performing rolling updates of the Keycloak image if both images contain the same version. +This is useful, for example, when switching to an optimized image, changing a theme or a provider source code. + +In this release, we extended this to perform rolling update when the new image contains a future patch release from the same `major.minor` release stream as a preview feature. +This can reduce the service's downtime even further, as downtime is only needed when upgrading from a different minor or major version. + +Read more on how to enable this feature in https://www.keycloak.org/server/update-compatibility#rolling-updates-for-patch-releases[update compatibility command]. + += Passkeys integrated in the default username forms + +In this release {project_name} integrates *Passkeys* in the default authentications forms. A new switch *Enable Passkeys* is available in the configuration, *Authentication* → *Policies* → *Webauthn Passwordless Policy*, that seamlessly incorporates passkeys support to the realm. With just one click, {project_name} offers conditional and modal user interfaces in the default login forms to allow users to authenticate with a passkey. + +The *Passkeys* feature is still in preview. Follow the https://www.keycloak.org/server/features[Enabling and disabling features] {section} to enable it. + +For more information, see link:{adminguide_link}#passkeys_server_administration_guide[Passkeys section in the {adminguide_name}]. diff --git a/docs/documentation/release_notes/topics/26_4_0.adoc b/docs/documentation/release_notes/topics/26_4_0.adoc new file mode 100644 index 000000000000..da03935762cf --- /dev/null +++ b/docs/documentation/release_notes/topics/26_4_0.adoc @@ -0,0 +1,8 @@ +// Release notes should contain only headline-worthy new features, +// assuming that people who migrate will read the upgrading guide anyway. + +Read on to learn more about each new feature, and https://www.keycloak.org/docs/latest/upgrading/index.html[find additional details in the upgrading guide] if you are upgrading from a previous release of {project_name}. + += Option to force management interface to use HTTP. + +There's a new option `http-management-scheme` that may be set to `http` to force the management interface to use HTTP rather than inheriting the HTTPS settings of the main interface. diff --git a/docs/documentation/release_notes/topics/template.adoc b/docs/documentation/release_notes/topics/template.adoc new file mode 100644 index 000000000000..d7b239ac2ddf --- /dev/null +++ b/docs/documentation/release_notes/topics/template.adoc @@ -0,0 +1,4 @@ +// Release notes should contain only headline-worthy new features, +// assuming that people who migrate will read the upgrading guide anyway. + += diff --git a/docs/documentation/server_admin/images/Create-top-level-flow.png b/docs/documentation/server_admin/images/Create-top-level-flow.png index c3bc2f0be9b3..6e605d09e68c 100644 Binary files a/docs/documentation/server_admin/images/Create-top-level-flow.png and b/docs/documentation/server_admin/images/Create-top-level-flow.png differ diff --git a/docs/documentation/server_admin/images/New-flow.png b/docs/documentation/server_admin/images/New-flow.png index 2b1ab2883595..2246fe0ad14d 100644 Binary files a/docs/documentation/server_admin/images/New-flow.png and b/docs/documentation/server_admin/images/New-flow.png differ diff --git a/docs/documentation/server_admin/images/add-client-oidc.png b/docs/documentation/server_admin/images/add-client-oidc.png index 8495d76e0af7..b6b77f142608 100644 Binary files a/docs/documentation/server_admin/images/add-client-oidc.png and b/docs/documentation/server_admin/images/add-client-oidc.png differ diff --git a/docs/documentation/server_admin/images/add-client-saml.png b/docs/documentation/server_admin/images/add-client-saml.png index 62d903cabd8c..1883c9b5a50d 100644 Binary files a/docs/documentation/server_admin/images/add-client-saml.png and b/docs/documentation/server_admin/images/add-client-saml.png differ diff --git a/docs/documentation/server_admin/images/add-identity-provider.png b/docs/documentation/server_admin/images/add-identity-provider.png index 1c9a72b103e7..793dcc86f4d1 100644 Binary files a/docs/documentation/server_admin/images/add-identity-provider.png and b/docs/documentation/server_admin/images/add-identity-provider.png differ diff --git a/docs/documentation/server_admin/images/add-mapper.png b/docs/documentation/server_admin/images/add-mapper.png index ac02689ec8ac..6c31a0c4908a 100644 Binary files a/docs/documentation/server_admin/images/add-mapper.png and b/docs/documentation/server_admin/images/add-mapper.png differ diff --git a/docs/documentation/server_admin/images/add-realm-menu.png b/docs/documentation/server_admin/images/add-realm-menu.png deleted file mode 100644 index cf8a82cd7354..000000000000 Binary files a/docs/documentation/server_admin/images/add-realm-menu.png and /dev/null differ diff --git a/docs/documentation/server_admin/images/admin-console.png b/docs/documentation/server_admin/images/admin-console.png index 69e7a1d47be4..267f7db107a1 100644 Binary files a/docs/documentation/server_admin/images/admin-console.png and b/docs/documentation/server_admin/images/admin-console.png differ diff --git a/docs/documentation/server_admin/images/audience_mapper.png b/docs/documentation/server_admin/images/audience_mapper.png index ee4b8cf67e5b..7625d7d7614a 100644 Binary files a/docs/documentation/server_admin/images/audience_mapper.png and b/docs/documentation/server_admin/images/audience_mapper.png differ diff --git a/docs/documentation/server_admin/images/audience_resolving_evaluate.png b/docs/documentation/server_admin/images/audience_resolving_evaluate.png new file mode 100644 index 000000000000..4be5084ccee9 Binary files /dev/null and b/docs/documentation/server_admin/images/audience_resolving_evaluate.png differ diff --git a/docs/documentation/server_admin/images/browser-flow.png b/docs/documentation/server_admin/images/browser-flow.png index 8ca5bf786b69..53454e4660df 100644 Binary files a/docs/documentation/server_admin/images/browser-flow.png and b/docs/documentation/server_admin/images/browser-flow.png differ diff --git a/docs/documentation/server_admin/images/brute-force-mixed.png b/docs/documentation/server_admin/images/brute-force-mixed.png index 3099c5683055..a3c31d37789a 100644 Binary files a/docs/documentation/server_admin/images/brute-force-mixed.png and b/docs/documentation/server_admin/images/brute-force-mixed.png differ diff --git a/docs/documentation/server_admin/images/brute-force-permanently.png b/docs/documentation/server_admin/images/brute-force-permanently.png index 6108f736bc25..f77a1018626a 100644 Binary files a/docs/documentation/server_admin/images/brute-force-permanently.png and b/docs/documentation/server_admin/images/brute-force-permanently.png differ diff --git a/docs/documentation/server_admin/images/brute-force-temporarily.png b/docs/documentation/server_admin/images/brute-force-temporarily.png index 19f588183f3c..291c1b5bcc16 100644 Binary files a/docs/documentation/server_admin/images/brute-force-temporarily.png and b/docs/documentation/server_admin/images/brute-force-temporarily.png differ diff --git a/docs/documentation/server_admin/images/brute-force.png b/docs/documentation/server_admin/images/brute-force.png index 74abfa3cbdcf..c9e2dcd4c7c1 100644 Binary files a/docs/documentation/server_admin/images/brute-force.png and b/docs/documentation/server_admin/images/brute-force.png differ diff --git a/docs/documentation/server_admin/images/client-credentials-jwt.png b/docs/documentation/server_admin/images/client-credentials-jwt.png index da328b5fe71b..fc9148614c68 100644 Binary files a/docs/documentation/server_admin/images/client-credentials-jwt.png and b/docs/documentation/server_admin/images/client-credentials-jwt.png differ diff --git a/docs/documentation/server_admin/images/client-credentials.png b/docs/documentation/server_admin/images/client-credentials.png index 07fbf857238a..abbeea2450bb 100644 Binary files a/docs/documentation/server_admin/images/client-credentials.png and b/docs/documentation/server_admin/images/client-credentials.png differ diff --git a/docs/documentation/server_admin/images/client-oidc-keys.png b/docs/documentation/server_admin/images/client-oidc-keys.png index 4f0cd91b713a..bf9b4c7ad824 100644 Binary files a/docs/documentation/server_admin/images/client-oidc-keys.png and b/docs/documentation/server_admin/images/client-oidc-keys.png differ diff --git a/docs/documentation/server_admin/images/client-scope.png b/docs/documentation/server_admin/images/client-scope.png index 74983834b0e4..d71c323d0a6e 100644 Binary files a/docs/documentation/server_admin/images/client-scope.png and b/docs/documentation/server_admin/images/client-scope.png differ diff --git a/docs/documentation/server_admin/images/client-scopes-evaluate.png b/docs/documentation/server_admin/images/client-scopes-evaluate.png index 24495a3de315..eacbbc490620 100644 Binary files a/docs/documentation/server_admin/images/client-scopes-evaluate.png and b/docs/documentation/server_admin/images/client-scopes-evaluate.png differ diff --git a/docs/documentation/server_admin/images/client-scopes-list.png b/docs/documentation/server_admin/images/client-scopes-list.png index 58cd44011632..026d58d3a2e8 100644 Binary files a/docs/documentation/server_admin/images/client-scopes-list.png and b/docs/documentation/server_admin/images/client-scopes-list.png differ diff --git a/docs/documentation/server_admin/images/client-scopes-phone.png b/docs/documentation/server_admin/images/client-scopes-phone.png index 4a315c36ed5c..107e1fc9b3f6 100644 Binary files a/docs/documentation/server_admin/images/client-scopes-phone.png and b/docs/documentation/server_admin/images/client-scopes-phone.png differ diff --git a/docs/documentation/server_admin/images/client-sessions.png b/docs/documentation/server_admin/images/client-sessions.png index 5fbfc298f024..f156627c0ebd 100644 Binary files a/docs/documentation/server_admin/images/client-sessions.png and b/docs/documentation/server_admin/images/client-sessions.png differ diff --git a/docs/documentation/server_admin/images/client-settings-oidc.png b/docs/documentation/server_admin/images/client-settings-oidc.png index 3319850eb422..ae624a81d683 100644 Binary files a/docs/documentation/server_admin/images/client-settings-oidc.png and b/docs/documentation/server_admin/images/client-settings-oidc.png differ diff --git a/docs/documentation/server_admin/images/client-settings-saml.png b/docs/documentation/server_admin/images/client-settings-saml.png index 7309e740c5d0..e010073014e2 100644 Binary files a/docs/documentation/server_admin/images/client-settings-saml.png and b/docs/documentation/server_admin/images/client-settings-saml.png differ diff --git a/docs/documentation/server_admin/images/create-oidc-client-profile.png b/docs/documentation/server_admin/images/create-oidc-client-profile.png index f84d3e4ca624..f1443d9beba9 100644 Binary files a/docs/documentation/server_admin/images/create-oidc-client-profile.png and b/docs/documentation/server_admin/images/create-oidc-client-profile.png differ diff --git a/docs/documentation/server_admin/images/create-oidc-client-secret-rotation-condition.png b/docs/documentation/server_admin/images/create-oidc-client-secret-rotation-condition.png index d894fef87406..74f8c7a5209d 100644 Binary files a/docs/documentation/server_admin/images/create-oidc-client-secret-rotation-condition.png and b/docs/documentation/server_admin/images/create-oidc-client-secret-rotation-condition.png differ diff --git a/docs/documentation/server_admin/images/create-oidc-client-secret-rotation-executor.png b/docs/documentation/server_admin/images/create-oidc-client-secret-rotation-executor.png index d99d1292a8ac..fb2d5524f732 100644 Binary files a/docs/documentation/server_admin/images/create-oidc-client-secret-rotation-executor.png and b/docs/documentation/server_admin/images/create-oidc-client-secret-rotation-executor.png differ diff --git a/docs/documentation/server_admin/images/create-oidc-client-secret-rotation-policy.png b/docs/documentation/server_admin/images/create-oidc-client-secret-rotation-policy.png index 8673a818779c..f1817b9e8139 100644 Binary files a/docs/documentation/server_admin/images/create-oidc-client-secret-rotation-policy.png and b/docs/documentation/server_admin/images/create-oidc-client-secret-rotation-policy.png differ diff --git a/docs/documentation/server_admin/images/create-permission.png b/docs/documentation/server_admin/images/create-permission.png new file mode 100644 index 000000000000..db6ca708fc8f Binary files /dev/null and b/docs/documentation/server_admin/images/create-permission.png differ diff --git a/docs/documentation/server_admin/images/default-groups.png b/docs/documentation/server_admin/images/default-groups.png index 7f7baca6975e..a4530b5b2e56 100644 Binary files a/docs/documentation/server_admin/images/default-groups.png and b/docs/documentation/server_admin/images/default-groups.png differ diff --git a/docs/documentation/server_admin/images/default-roles.png b/docs/documentation/server_admin/images/default-roles.png index 7aea18c8d6c7..716fb1e19f6f 100644 Binary files a/docs/documentation/server_admin/images/default-roles.png and b/docs/documentation/server_admin/images/default-roles.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-client-assign-user-policy.png b/docs/documentation/server_admin/images/fine-grain-client-assign-user-policy.png index b8e367c21d97..57f445f0161c 100644 Binary files a/docs/documentation/server_admin/images/fine-grain-client-assign-user-policy.png and b/docs/documentation/server_admin/images/fine-grain-client-assign-user-policy.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-client-manage-permissions.png b/docs/documentation/server_admin/images/fine-grain-client-manage-permissions.png index 96238bed439c..6fd94a6b750d 100644 Binary files a/docs/documentation/server_admin/images/fine-grain-client-manage-permissions.png and b/docs/documentation/server_admin/images/fine-grain-client-manage-permissions.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-client-permissions-tab-off.png b/docs/documentation/server_admin/images/fine-grain-client-permissions-tab-off.png index 0caa7040e9c3..3e0174b16b0e 100644 Binary files a/docs/documentation/server_admin/images/fine-grain-client-permissions-tab-off.png and b/docs/documentation/server_admin/images/fine-grain-client-permissions-tab-off.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-client-permissions-tab-on.png b/docs/documentation/server_admin/images/fine-grain-client-permissions-tab-on.png index 33e12727209f..22c77a988542 100644 Binary files a/docs/documentation/server_admin/images/fine-grain-client-permissions-tab-on.png and b/docs/documentation/server_admin/images/fine-grain-client-permissions-tab-on.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-client-user-policy.png b/docs/documentation/server_admin/images/fine-grain-client-user-policy.png index 3861ea359d1b..cd2e55782362 100644 Binary files a/docs/documentation/server_admin/images/fine-grain-client-user-policy.png and b/docs/documentation/server_admin/images/fine-grain-client-user-policy.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-client.png b/docs/documentation/server_admin/images/fine-grain-client.png index fa9fbbc1ef4c..f6f7b5ecd320 100644 Binary files a/docs/documentation/server_admin/images/fine-grain-client.png and b/docs/documentation/server_admin/images/fine-grain-client.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-enable.png b/docs/documentation/server_admin/images/fine-grain-enable.png new file mode 100644 index 000000000000..4e0b705d059d Binary files /dev/null and b/docs/documentation/server_admin/images/fine-grain-enable.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-evaluation.png b/docs/documentation/server_admin/images/fine-grain-evaluation.png new file mode 100644 index 000000000000..959efa30f861 Binary files /dev/null and b/docs/documentation/server_admin/images/fine-grain-evaluation.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-map-roles-permission.png b/docs/documentation/server_admin/images/fine-grain-map-roles-permission.png index ea1ca3b84831..75b28802fe41 100644 Binary files a/docs/documentation/server_admin/images/fine-grain-map-roles-permission.png and b/docs/documentation/server_admin/images/fine-grain-map-roles-permission.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-permissions-tab.png b/docs/documentation/server_admin/images/fine-grain-permissions-tab.png new file mode 100644 index 000000000000..1cce9b58ddfd Binary files /dev/null and b/docs/documentation/server_admin/images/fine-grain-permissions-tab.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-sales-admin-login.png b/docs/documentation/server_admin/images/fine-grain-sales-admin-login.png index 0468d7275496..b521f38d4bdb 100644 Binary files a/docs/documentation/server_admin/images/fine-grain-sales-admin-login.png and b/docs/documentation/server_admin/images/fine-grain-sales-admin-login.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-sales-application-roles.png b/docs/documentation/server_admin/images/fine-grain-sales-application-roles.png index bf7c132bbef9..43cc8fffda3b 100644 Binary files a/docs/documentation/server_admin/images/fine-grain-sales-application-roles.png and b/docs/documentation/server_admin/images/fine-grain-sales-application-roles.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-search.png b/docs/documentation/server_admin/images/fine-grain-search.png new file mode 100644 index 000000000000..807365c36e88 Binary files /dev/null and b/docs/documentation/server_admin/images/fine-grain-search.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-users-permissions.png b/docs/documentation/server_admin/images/fine-grain-users-permissions.png index 3a2e1f2e2d50..7aa5dc5ee298 100644 Binary files a/docs/documentation/server_admin/images/fine-grain-users-permissions.png and b/docs/documentation/server_admin/images/fine-grain-users-permissions.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-view-leads-permissions.png b/docs/documentation/server_admin/images/fine-grain-view-leads-permissions.png index 3364f6514116..93c4f4a9e6be 100644 Binary files a/docs/documentation/server_admin/images/fine-grain-view-leads-permissions.png and b/docs/documentation/server_admin/images/fine-grain-view-leads-permissions.png differ diff --git a/docs/documentation/server_admin/images/fine-grain-view-leads-role-tab.png b/docs/documentation/server_admin/images/fine-grain-view-leads-role-tab.png index fa0628ac33ee..b0b0890d2c6c 100644 Binary files a/docs/documentation/server_admin/images/fine-grain-view-leads-role-tab.png and b/docs/documentation/server_admin/images/fine-grain-view-leads-role-tab.png differ diff --git a/docs/documentation/server_admin/images/full-client-scope.png b/docs/documentation/server_admin/images/full-client-scope.png index 067058ca9599..d2584c747f78 100644 Binary files a/docs/documentation/server_admin/images/full-client-scope.png and b/docs/documentation/server_admin/images/full-client-scope.png differ diff --git a/docs/documentation/server_admin/images/group-membership.png b/docs/documentation/server_admin/images/group-membership.png index 663f82ec5e31..f0131089c8ea 100644 Binary files a/docs/documentation/server_admin/images/group-membership.png and b/docs/documentation/server_admin/images/group-membership.png differ diff --git a/docs/documentation/server_admin/images/group.png b/docs/documentation/server_admin/images/group.png index 73b394aa085b..09dd49b08252 100644 Binary files a/docs/documentation/server_admin/images/group.png and b/docs/documentation/server_admin/images/group.png differ diff --git a/docs/documentation/server_admin/images/groups.png b/docs/documentation/server_admin/images/groups.png index 966882a685bd..49659c5a76a3 100644 Binary files a/docs/documentation/server_admin/images/groups.png and b/docs/documentation/server_admin/images/groups.png differ diff --git a/docs/documentation/server_admin/images/identity-provider-mapper.png b/docs/documentation/server_admin/images/identity-provider-mapper.png index 413d9f7ab994..ab0b39a62936 100644 Binary files a/docs/documentation/server_admin/images/identity-provider-mapper.png and b/docs/documentation/server_admin/images/identity-provider-mapper.png differ diff --git a/docs/documentation/server_admin/images/identity-provider-mappers.png b/docs/documentation/server_admin/images/identity-provider-mappers.png index 9d2614d341a1..8236ee35e979 100644 Binary files a/docs/documentation/server_admin/images/identity-provider-mappers.png and b/docs/documentation/server_admin/images/identity-provider-mappers.png differ diff --git a/docs/documentation/server_admin/images/identity-providers.png b/docs/documentation/server_admin/images/identity-providers.png index af7d28ef1427..372dcc194111 100644 Binary files a/docs/documentation/server_admin/images/identity-providers.png and b/docs/documentation/server_admin/images/identity-providers.png differ diff --git a/docs/documentation/server_admin/images/import-client-saml.png b/docs/documentation/server_admin/images/import-client-saml.png index 49f80ebb9198..86984ec0c230 100644 Binary files a/docs/documentation/server_admin/images/import-client-saml.png and b/docs/documentation/server_admin/images/import-client-saml.png differ diff --git a/docs/documentation/server_admin/images/initial-welcome-page.png b/docs/documentation/server_admin/images/initial-welcome-page.png index 4674c5224305..5338f5a7c9c7 100644 Binary files a/docs/documentation/server_admin/images/initial-welcome-page.png and b/docs/documentation/server_admin/images/initial-welcome-page.png differ diff --git a/docs/documentation/server_admin/images/kerberos-provider.png b/docs/documentation/server_admin/images/kerberos-provider.png index b352d86f3536..48d588835b4d 100644 Binary files a/docs/documentation/server_admin/images/kerberos-provider.png and b/docs/documentation/server_admin/images/kerberos-provider.png differ diff --git a/docs/documentation/server_admin/images/login-page.png b/docs/documentation/server_admin/images/login-page.png index 8f28b18bddd9..1b65ac873df1 100644 Binary files a/docs/documentation/server_admin/images/login-page.png and b/docs/documentation/server_admin/images/login-page.png differ diff --git a/docs/documentation/server_admin/images/login-tab.png b/docs/documentation/server_admin/images/login-tab.png index 823643763b02..66ca105e0b3e 100644 Binary files a/docs/documentation/server_admin/images/login-tab.png and b/docs/documentation/server_admin/images/login-tab.png differ diff --git a/docs/documentation/server_admin/images/mapper-config.png b/docs/documentation/server_admin/images/mapper-config.png index 5710a10bf49e..4721663ae72e 100644 Binary files a/docs/documentation/server_admin/images/mapper-config.png and b/docs/documentation/server_admin/images/mapper-config.png differ diff --git a/docs/documentation/server_admin/images/mapper-oidc-client-roles.png b/docs/documentation/server_admin/images/mapper-oidc-client-roles.png new file mode 100644 index 000000000000..18039bb13a59 Binary files /dev/null and b/docs/documentation/server_admin/images/mapper-oidc-client-roles.png differ diff --git a/docs/documentation/server_admin/images/mapper-oidc-realm-roles.png b/docs/documentation/server_admin/images/mapper-oidc-realm-roles.png new file mode 100644 index 000000000000..41e16d521195 Binary files /dev/null and b/docs/documentation/server_admin/images/mapper-oidc-realm-roles.png differ diff --git a/docs/documentation/server_admin/images/mappers-oidc.png b/docs/documentation/server_admin/images/mappers-oidc.png index 7f6b9e954841..2118257952ee 100644 Binary files a/docs/documentation/server_admin/images/mappers-oidc.png and b/docs/documentation/server_admin/images/mappers-oidc.png differ diff --git a/docs/documentation/server_admin/images/oidc-add-identity-provider.png b/docs/documentation/server_admin/images/oidc-add-identity-provider.png index 27553eaf99dd..7a5719848418 100644 Binary files a/docs/documentation/server_admin/images/oidc-add-identity-provider.png and b/docs/documentation/server_admin/images/oidc-add-identity-provider.png differ diff --git a/docs/documentation/server_admin/images/oidc-client-secret-rotation-policy.png b/docs/documentation/server_admin/images/oidc-client-secret-rotation-policy.png index a056c6f75499..1daa54b45682 100644 Binary files a/docs/documentation/server_admin/images/oidc-client-secret-rotation-policy.png and b/docs/documentation/server_admin/images/oidc-client-secret-rotation-policy.png differ diff --git a/docs/documentation/server_admin/images/organizations-add-org-attrs-in-claim.png b/docs/documentation/server_admin/images/organizations-add-org-attrs-in-claim.png index c1184f9d5aa1..17bd74e222f3 100644 Binary files a/docs/documentation/server_admin/images/organizations-add-org-attrs-in-claim.png and b/docs/documentation/server_admin/images/organizations-add-org-attrs-in-claim.png differ diff --git a/docs/documentation/server_admin/images/organizations-browser-flow.png b/docs/documentation/server_admin/images/organizations-browser-flow.png index 7a9a1625d4a2..afcdba2ebaab 100644 Binary files a/docs/documentation/server_admin/images/organizations-browser-flow.png and b/docs/documentation/server_admin/images/organizations-browser-flow.png differ diff --git a/docs/documentation/server_admin/images/organizations-create-org.png b/docs/documentation/server_admin/images/organizations-create-org.png index e5909eabe272..fe5c72034ef0 100644 Binary files a/docs/documentation/server_admin/images/organizations-create-org.png and b/docs/documentation/server_admin/images/organizations-create-org.png differ diff --git a/docs/documentation/server_admin/images/organizations-edit-identity-provider.png b/docs/documentation/server_admin/images/organizations-edit-identity-provider.png index 569c770444be..ac7016d335b0 100644 Binary files a/docs/documentation/server_admin/images/organizations-edit-identity-provider.png and b/docs/documentation/server_admin/images/organizations-edit-identity-provider.png differ diff --git a/docs/documentation/server_admin/images/organizations-enabling-orgs.png b/docs/documentation/server_admin/images/organizations-enabling-orgs.png index cb7caf53efc8..589ea57fd46f 100644 Binary files a/docs/documentation/server_admin/images/organizations-enabling-orgs.png and b/docs/documentation/server_admin/images/organizations-enabling-orgs.png differ diff --git a/docs/documentation/server_admin/images/organizations-first-broker-flow.png b/docs/documentation/server_admin/images/organizations-first-broker-flow.png index ec383087ec5c..3a0a53e4a1c1 100644 Binary files a/docs/documentation/server_admin/images/organizations-first-broker-flow.png and b/docs/documentation/server_admin/images/organizations-first-broker-flow.png differ diff --git a/docs/documentation/server_admin/images/organizations-identity-providers.png b/docs/documentation/server_admin/images/organizations-identity-providers.png index 63094944e40c..a1c16f4776b5 100644 Binary files a/docs/documentation/server_admin/images/organizations-identity-providers.png and b/docs/documentation/server_admin/images/organizations-identity-providers.png differ diff --git a/docs/documentation/server_admin/images/organizations-manage-attributes.png b/docs/documentation/server_admin/images/organizations-manage-attributes.png index e19e41a88ef8..25f28f5e0fa1 100644 Binary files a/docs/documentation/server_admin/images/organizations-manage-attributes.png and b/docs/documentation/server_admin/images/organizations-manage-attributes.png differ diff --git a/docs/documentation/server_admin/images/organizations-manage-members.png b/docs/documentation/server_admin/images/organizations-manage-members.png index 6cd20b21995c..b1088b14aa19 100644 Binary files a/docs/documentation/server_admin/images/organizations-manage-members.png and b/docs/documentation/server_admin/images/organizations-manage-members.png differ diff --git a/docs/documentation/server_admin/images/organizations-management-screen.png b/docs/documentation/server_admin/images/organizations-management-screen.png index 6aaf81c45337..fe4fff2c851f 100644 Binary files a/docs/documentation/server_admin/images/organizations-management-screen.png and b/docs/documentation/server_admin/images/organizations-management-screen.png differ diff --git a/docs/documentation/server_admin/images/otp-policy.png b/docs/documentation/server_admin/images/otp-policy.png index b61b8a71f842..c285962542b3 100644 Binary files a/docs/documentation/server_admin/images/otp-policy.png and b/docs/documentation/server_admin/images/otp-policy.png differ diff --git a/docs/documentation/server_admin/images/passkey-conditional-ui-authentication.png b/docs/documentation/server_admin/images/passkey-conditional-ui-authentication.png deleted file mode 100644 index 41046a819568..000000000000 Binary files a/docs/documentation/server_admin/images/passkey-conditional-ui-authentication.png and /dev/null differ diff --git a/docs/documentation/server_admin/images/passkey-conditional-ui-autofill.png b/docs/documentation/server_admin/images/passkey-conditional-ui-autofill.png index 5d488c32bc84..9657ef2c591b 100644 Binary files a/docs/documentation/server_admin/images/passkey-conditional-ui-autofill.png and b/docs/documentation/server_admin/images/passkey-conditional-ui-autofill.png differ diff --git a/docs/documentation/server_admin/images/passkey-modal-ui.png b/docs/documentation/server_admin/images/passkey-modal-ui.png new file mode 100644 index 000000000000..0ea058cf73ae Binary files /dev/null and b/docs/documentation/server_admin/images/passkey-modal-ui.png differ diff --git a/docs/documentation/server_admin/images/password-policy.png b/docs/documentation/server_admin/images/password-policy.png index 8936f4f03747..c8e783266717 100644 Binary files a/docs/documentation/server_admin/images/password-policy.png and b/docs/documentation/server_admin/images/password-policy.png differ diff --git a/docs/documentation/server_admin/images/realm-settings.png b/docs/documentation/server_admin/images/realm-settings.png new file mode 100644 index 000000000000..9fb87faf43ba Binary files /dev/null and b/docs/documentation/server_admin/images/realm-settings.png differ diff --git a/docs/documentation/server_admin/images/recovery-codes-account-console-warn.png b/docs/documentation/server_admin/images/recovery-codes-account-console-warn.png new file mode 100644 index 000000000000..31fc185b7263 Binary files /dev/null and b/docs/documentation/server_admin/images/recovery-codes-account-console-warn.png differ diff --git a/docs/documentation/server_admin/images/recovery-codes-browser-flow.png b/docs/documentation/server_admin/images/recovery-codes-browser-flow.png new file mode 100644 index 000000000000..75261b8bf630 Binary files /dev/null and b/docs/documentation/server_admin/images/recovery-codes-browser-flow.png differ diff --git a/docs/documentation/server_admin/images/recovery-codes-setup.png b/docs/documentation/server_admin/images/recovery-codes-setup.png new file mode 100644 index 000000000000..07b1a308938d Binary files /dev/null and b/docs/documentation/server_admin/images/recovery-codes-setup.png differ diff --git a/docs/documentation/server_admin/images/reset-credential-email-config.png b/docs/documentation/server_admin/images/reset-credential-email-config.png new file mode 100644 index 000000000000..be0e6e48a3c5 Binary files /dev/null and b/docs/documentation/server_admin/images/reset-credential-email-config.png differ diff --git a/docs/documentation/server_admin/images/role.png b/docs/documentation/server_admin/images/role.png deleted file mode 100644 index fdd4108c92bd..000000000000 Binary files a/docs/documentation/server_admin/images/role.png and /dev/null differ diff --git a/docs/documentation/server_admin/images/roles.png b/docs/documentation/server_admin/images/roles.png index 487c09eb3d0c..830d63c28767 100644 Binary files a/docs/documentation/server_admin/images/roles.png and b/docs/documentation/server_admin/images/roles.png differ diff --git a/docs/documentation/server_admin/images/saml-add-identity-provider.png b/docs/documentation/server_admin/images/saml-add-identity-provider.png index 895c9a4dcad3..512575c734bc 100644 Binary files a/docs/documentation/server_admin/images/saml-add-identity-provider.png and b/docs/documentation/server_admin/images/saml-add-identity-provider.png differ diff --git a/docs/documentation/server_admin/images/security-headers.png b/docs/documentation/server_admin/images/security-headers.png index d5d1abcc2047..278644259d3f 100644 Binary files a/docs/documentation/server_admin/images/security-headers.png and b/docs/documentation/server_admin/images/security-headers.png differ diff --git a/docs/documentation/server_admin/images/select-policy-type.png b/docs/documentation/server_admin/images/select-policy-type.png new file mode 100644 index 000000000000..9b43e0478024 Binary files /dev/null and b/docs/documentation/server_admin/images/select-policy-type.png differ diff --git a/docs/documentation/server_admin/images/select-resource-type.png b/docs/documentation/server_admin/images/select-resource-type.png new file mode 100644 index 000000000000..14ca9554afcd Binary files /dev/null and b/docs/documentation/server_admin/images/select-resource-type.png differ diff --git a/docs/documentation/server_admin/images/sessions.png b/docs/documentation/server_admin/images/sessions.png index dc1242aa8ba7..1b1b628f5b0d 100644 Binary files a/docs/documentation/server_admin/images/sessions.png and b/docs/documentation/server_admin/images/sessions.png differ diff --git a/docs/documentation/server_admin/images/user-fed-ldap.png b/docs/documentation/server_admin/images/user-fed-ldap.png new file mode 100644 index 000000000000..8b36356b9669 Binary files /dev/null and b/docs/documentation/server_admin/images/user-fed-ldap.png differ diff --git a/docs/documentation/server_admin/images/user-federation.png b/docs/documentation/server_admin/images/user-federation.png index 48a0a3ed3086..ed36fd5ecd10 100644 Binary files a/docs/documentation/server_admin/images/user-federation.png and b/docs/documentation/server_admin/images/user-federation.png differ diff --git a/docs/documentation/server_admin/images/user-sessions.png b/docs/documentation/server_admin/images/user-sessions.png index 51b3b4477e17..b3a3ffcf4172 100644 Binary files a/docs/documentation/server_admin/images/user-sessions.png and b/docs/documentation/server_admin/images/user-sessions.png differ diff --git a/docs/documentation/server_admin/images/webauthn-browser-flow-conditional-with-OTP.png b/docs/documentation/server_admin/images/webauthn-browser-flow-conditional-with-OTP.png index f246f1e62ead..1b220d0803d9 100644 Binary files a/docs/documentation/server_admin/images/webauthn-browser-flow-conditional-with-OTP.png and b/docs/documentation/server_admin/images/webauthn-browser-flow-conditional-with-OTP.png differ diff --git a/docs/documentation/server_admin/images/webauthn-browser-flow-conditional.png b/docs/documentation/server_admin/images/webauthn-browser-flow-conditional.png index 55907bf6ca12..143809136a02 100644 Binary files a/docs/documentation/server_admin/images/webauthn-browser-flow-conditional.png and b/docs/documentation/server_admin/images/webauthn-browser-flow-conditional.png differ diff --git a/docs/documentation/server_admin/images/webauthn-browser-flow-required.png b/docs/documentation/server_admin/images/webauthn-browser-flow-required.png index 9ae672a81643..9019b5c15e5f 100644 Binary files a/docs/documentation/server_admin/images/webauthn-browser-flow-required.png and b/docs/documentation/server_admin/images/webauthn-browser-flow-required.png differ diff --git a/docs/documentation/server_admin/images/x509-browser-flow.png b/docs/documentation/server_admin/images/x509-browser-flow.png index c6eaa1c033f9..0e026a37b796 100644 Binary files a/docs/documentation/server_admin/images/x509-browser-flow.png and b/docs/documentation/server_admin/images/x509-browser-flow.png differ diff --git a/docs/documentation/server_admin/images/x509-client-auth.png b/docs/documentation/server_admin/images/x509-client-auth.png index 61896fc70096..db509da01323 100644 Binary files a/docs/documentation/server_admin/images/x509-client-auth.png and b/docs/documentation/server_admin/images/x509-client-auth.png differ diff --git a/docs/documentation/server_admin/topics.adoc b/docs/documentation/server_admin/topics.adoc index 6909103e7544..2cbd1ccc961d 100644 --- a/docs/documentation/server_admin/topics.adoc +++ b/docs/documentation/server_admin/topics.adoc @@ -45,6 +45,7 @@ include::topics/identity-broker/social/paypal.adoc[] include::topics/identity-broker/social/stack-overflow.adoc[] include::topics/identity-broker/social/twitter.adoc[] include::topics/identity-broker/oidc.adoc[] +include::topics/identity-broker/oauth2.adoc[] include::topics/identity-broker/saml.adoc[] include::topics/identity-broker/suggested.adoc[] include::topics/identity-broker/mappers.adoc[] @@ -57,11 +58,15 @@ include::topics/sso-protocols.adoc[] include::topics/admin-console-permissions.adoc[] include::topics/admin-console-permissions/master-realm.adoc[] include::topics/admin-console-permissions/per-realm.adoc[] +include::topics/admin-console-permissions/fine-grain-v2.adoc[] ifeval::[{project_community}==true] include::topics/admin-console-permissions/fine-grain.adoc[] endif::[] include::topics/assembly-managing-organizations.adoc[] include::topics/assembly-managing-clients.adoc[] +ifeval::[{project_community}==true] +include::topics/oid4vci/vc-issuer-configuration.adoc[] +endif::[] include::topics/vault.adoc[] include::topics/events.adoc[] include::topics/threat.adoc[] diff --git a/docs/documentation/server_admin/topics/admin-console-permissions.adoc b/docs/documentation/server_admin/topics/admin-console-permissions.adoc index a9f191e062d4..d24a4f2f8d0a 100644 --- a/docs/documentation/server_admin/topics/admin-console-permissions.adoc +++ b/docs/documentation/server_admin/topics/admin-console-permissions.adoc @@ -1,6 +1,6 @@ [[_admin_permissions]] -== Controlling access to the Admin Console +== Managing access to realm resources Each realm created on the {project_name} has a dedicated Admin Console from which that realm can be managed. The `master` realm is a special realm that allows admins to manage more than one realm on the system. diff --git a/docs/documentation/server_admin/topics/admin-console-permissions/fine-grain-v2.adoc b/docs/documentation/server_admin/topics/admin-console-permissions/fine-grain-v2.adoc new file mode 100644 index 000000000000..ae7ec36dac14 --- /dev/null +++ b/docs/documentation/server_admin/topics/admin-console-permissions/fine-grain-v2.adoc @@ -0,0 +1,471 @@ +[[_fine_grained_permissions]] + +=== Delegating realm administration using permissions + +You can delegate realm management to other administrators, the realm administrators, using the fine-grained admin permissions +feature. +Different from the Role-Based Access Control (RBAC) Mechanism provided through the +<<_master_realm_access_control, Global and Realm specific roles>>, this feature provides a more fine-grained control over +how realm resources can be accessed and managed based on a well-defined set of operations that can be performed on them. + +By relying on a Policy-Based Access Control, server administrators can define permissions to realm resources such as users, +groups, and clients, using different policy types, or access control methods, so that a realm administrator is limited to +access a subset of realm resources and their operations. + +The feature provides an alternative to the aforementioned RBAC mechanism, but it does +not replace it. You are still able to grant administrative roles like `view-users` or `manage-clients` to delegate access +to realm administrators but doing so will skip the mechanisms provided by this feature. + +Enforcing access to realm resources only applies when managing resources through the administration console or the Admin API. + +==== Understanding the Realm Resource Types + +In a realm, you can manage different types of resources such as users, groups, clients, client scopes, roles, and so on. +As a realm administrator, you are constantly managing these resources when managing identities and how they authenticate +and are authorized to access a realm and applications. + +This feature provides the necessary mechanisms to enforce access controls when managing realm resources, limited to: + +* Users +* Groups +* Clients +* Roles + +You can manage permissions for all resources of a given resource type, such as all users in a realm, or +for a specific realm resource, such as a specific user or set of users in the realm. + +==== Understanding the scopes of access + +Each realm resource supports a well-defined set of management operations, or scopes, that can be performed on them, +such as `view`, `manage`, and resource-specific operations such as `view-members`, if you take groups as an example. + +When managing permissions, you are selecting a set of one or more scopes from a resource type to allow realm administrators +to perform specific operations on a resource type. For instance, granting a `view` scope will give access to realm administrators +to list, search, and view a realm resource. On the other hand, the `manage` scope will allow administrators to perform updates +and deletes on them. + +The scopes are completely independent of each other. If you give access to `manage` a realm resource, that does not mean the +`view` scope is granted automatically. No transitive dependency exists between scopes. Although this might impact the +overall user experience when managing permissions because you need to select individual scopes, +the benefit is that you can more easily identify the permissions that enforce access to a specific scope. + +Certain scopes from a resource type have a relationship (not a transitive dependency) to scopes in another resource type. +This relationship is mainly true when you manage a resource type that represents a group of realm resources, such as realm groups +and their members. + +===== Users Resource Type + +The *Users* realm resource type represents the users in a realm. You can manage permissions for users based on the following +set of scopes: + +[cols="30%,50%,20%"] +|=== +| *Scope* | *Description* | *Also granted by* + +| *view* | Defines if a realm administrator can view users. This scope should be set whenever you want | `view-members` + to make users available from queries. +| *manage* | Defines if a realm administrator can manage users. | `manage-members` +| *manage-group-membership* | Defines if a realm administrator can assign or unassign users to/from groups. | None +| *map-roles* | Defines if a realm administrator can assign or unassign roles to/from users. | None +| *impersonate* | Defines if a realm administrator can impersonate other users. | `impersonate-members` +|=== + +The user resource type has a strong relationship with some of the permissions you can set to groups. Most of the time, +users are members of groups and granting access to `view-members` or `manage-members` of a group should also allow +a realm administrator to `view` and `manage` members of that group. + +[NOTE] +==== +This feature does not support enforcing access to federated resource, however, this limitation is being considered +for future improvement. +==== + +===== Groups Resource Type + +The *Groups* realm resource type represents the groups in a realm. You can manage permissions for groups based on the following +set of management operations: + +[cols="30%,70%"] +|=== +| *Operation* | *Description* + +| *view* | Defines if a realm administrator can view groups. This scope should be set whenever you want + to make groups available from queries. +| *manage* | Defines if a realm administrator can manage groups. +| *view-members* | Defines if a realm administrator can view group members. + This operation applies to any child group in the group hierarchy. + This can be prevented by explicitly denying permission for specific subgroups. +| *manage-members* | Defines if a realm administrator can manage group members. + This operation applies to any child group in the group hierarchy. + This can be prevented by explicitly denying permission for specific subgroups. +| *impersonate-members* | Defines if a realm administrator can impersonate group members. + This operation applies to any child group in the group hierarchy. + This can be prevented by explicitly denying permission for specific subgroups. +| *manage-membership* | Defines if a realm administrator can add or remove members from groups. +|=== + +===== Clients Resource Type + +The *Clients* realm resource type represents the clients in a realm. You can manage permissions for clients based on the following +set of management operations: + +[cols="30%,70%"] +|=== +| *Operation* | *Description* + +| *view* | Defines if a realm administrator can view clients. This scope should be set whenever you want + to make clients available from queries. +| *manage* | Defines if a realm administrator can manage clients. +| *map-roles* | Defines if a realm administrator can assign any role defined by a client to a user. +| *map-roles-composite* | Defines if a realm administrator can assign any role defined by a client as a composite to + another role. +| *map-roles-client-scope* | Define if a realm administrator can assign any role defined by a client to a client scope. +|=== + +The *map-roles* operation does not grant the ability to manage users or assign roles arbitrarily. The administrator must also +have user role mapping permissions on the user. + +===== Roles Resource Type + +The *Roles* realm resource type represents the roles in a realm. You can manage permissions for roles based on the following set of management operations: + +[cols="30%,70%"] +|=== +| *Operation* | *Description* + +| *map-role* | Defines if a realm administrator can assign a role (or multiple roles) to a user. +| *map-role-composite* | Defines if a realm administrator can assign a role (or multiple roles) as a composite to another role. +| *map-role-client-scope* | Defines if a realm administrator can apply a role (or multiple roles) to a client scope. +|=== + +The *map-roles* operation does not grant the ability to manage users or assign roles arbitrarily. The administrator must also +have user role mapping permissions on the user. + +If there is a client resource type permission for the *map-roles*, *map-roles-composite*, or *map-roles-client-scope* scopes, +it will take precedence over any role resource type permission if the role is a client role. + +==== Enabling admin permissions to a realm + +To enable fine-grained admin permissions in a realm, follow these steps: + +* Log in to the Admin Console. +* Click *Realm settings*. +* Enable *Admin Permissions* and click *Save*. + +image:images/fine-grain-enable.png[Fine grain enable] + +Once enabled, a *Permissions* section appears in the left-side menu of the administration console. + +image:images/fine-grain-permissions-tab.png[Fine grain permissions tab] + +From this section, you can manage the permissions for realm resources. + +[[_managing-permissions]] +==== Managing Permissions + +The *Permissions* tab provides an overview of all active permissions within a realm. From here, administrators can create, +update, delete, or search for permissions. You can also pre-evaluate the permissions you have created to check +if they are enforcing access to realm resources as expected. +For more details, see link:#_managing-permissions[Evaluating Permissions]. + +To create a permission, click on the `Create permission` button and select the resource type you want to protect. + +image:images/select-resource-type.png[Selecting a resource type to protect] + +Once you select the resource type, you can now define how access should be enforced for a set of one or more resources of the selected type: + +image:images/create-permission.png[Creating a permission] + +When managing a permission you can define the following settings: + +* *Name*: A unique name for the permission. The name should also not conflict with any policy name +* *Description*: An optional description to better describe what the permission is about +* *Authorization scopes*: A set of one or more scopes representing the operations you want to protect for the selected resource type. +An administrator must have explicit permission assigned for each operation to perform the corresponding action. For example, +assigning only *manage* without *view* will prevent the user from being visible. +* *Enforce access to*: Defines if the permission should enforce access to all resources of the selected type or to specific resources in a realm. +* *Policies*: Defines a set of one or more policies that should be evaluated to grant or deny access to the selected resource(s). + +After creating the permission, it will automatically take effect when enforcing access to (all) resources and scopes you selected. +Keep that fact in mind when creating and updating permissions in production. + +===== Defining permissions for viewing realm resources + +This feature relies on a partial evaluation mechanism to partially evaluate the permissions that a realm administrator has +when listing and viewing realm resources. This mechanism will pre-fetch all the permissions set for view-related scopes where the realm administrator +is referenced either directly or indirectly. + +Permissions that grant access to `view` a realm resource of a certain type must use one of the following policies to +make them available from queries: + +* `User` +* `Group` +* `Role` + +By using any of the policies above, {project_name} can pre-calculate the set of resources that a realm administration can view +by looking for a direct (if using a user policy) or indirect (if using a role or group policy) reference to the realm administrator. +Therefore, the partial evaluation mechanism involves decorating queries with access controls that will run at the database level. This capability is mainly important to +properly allow paginating resources as well as avoid an additional overhead on the server-side when evaluating permissions for each +realm resource returned by queries. + +Partial evaluation and filtering occurs only if the feature is enabled to a realm, and if the user is not granted +with view-related administrative roles like `view-users` or `view-clients`. For instance, it will not happen for regular server administrators granted +with the `admin` role. + +When querying resources, the partial evaluation mechanism works as follows: + +* Resolve all the permissions for a certain resource type that reference the realm administrator +* Pre-evaluate each permission to check if the realm administrator does or does not have access to the resources associated with the permission +* Decorate database queries based on the resources granted or denied + +As a result, the result set of a query will hold only the realm resources where realm administrators have access to any of the view-related scopes. + +===== Searching Permissions + +The Admin Console provides several ways to search for permissions, supporting the following capabilities: + +* Search for permissions that contain a specific string in their *Name* +* Search for permissions of a specific resource type, such as *Users* +* Search for permissions of a specific resource type that apply to a particular resource (such as *Users* resource type for user `myadmin`). +* Search for permissions of a specific resource type with a given scope (such as *Users* resource type permissions with the *manage* scope). +* Search for permissions of a specific resource type that apply to a particular resource and have a specific scope (such as *Users* resource +type permissions with the *manage* scope for user `myadmin`). + +.Fine grained permissions search +image:images/fine-grain-search.png[Fine grained permissions search] + +These capabilities allow server administrators to perform queries on their universe of permissions and identify which ones +are enforcing access to a set of one or more realm resources and their scopes. Combined with the evaluation tool on the +*Evaluation* tab, they provide a key management tool for managing permissions in a realm. See <<_evaluating-permissions, Evaluating Permissions>> +for more details. + +==== Managing Policies + +The *Policies* tab allows administrators to define conditions using different access control methods to determine whether +a permission should be granted to an administrator attempting to perform operations on a realm resource. When managing permissions, +you must associate at least a single policy to grant or deny access to a realm resource. + +Policies are basically conditions that will evaluate to either a `GRANT` or a `DENY`. Their outcome will decide whether +a permission should be granted or denied. + +A permission is only granted if all its associated policies evaluate to a `GRANT`. Otherwise, the permission is denied +and a realm administrator will not be able to access the protected resource. + +{project_name} provides a set of built-in policies that you can choose from: + +image:images/select-policy-type.png[Selecting a policy type] + +Once you have a well-defined and stable permission model for your realm, less need exists to create policies. You can instead reuse existing policies to create more permissions. + +For more details about each policy type, see link:{authorizationguide_link}#_policy_overview[Managing policies]. + +[[_evaluating-permissions]] +==== Evaluating Permissions + +The *Evaluation* tab provides a testing environment where administrators can verify that permissions are enforcing access +as expected. The administrator can see what permissions are involved when enforcing access to a particular resource and what the outcome is. + +You need to provide a set of fields in order to run an evaluation: + +* `User`, the realm administrator or the subject trying to access a resource +* `Resource Type`, the resource type you want to evaluate +* `Resource Selector`, depending on the selected `Resource Type` you will be prompted to select a specific realm resource like a user, group, or client. +* `Authorization scope`, the scope or the operation you want to evaluate. If not provided, the evaluation will happen for all the scopes of the selected resource type. + +.Fine grained permissions evaluation tab +image:images/fine-grain-evaluation.png[Fine grained permissions evaluation tab] + +By clicking the `Evaluate` button, the server will evaluate all the permissions associated with the selected resource and scopes +just like if the selected `User` were trying to access the resource when using the administration console or the Admin API. + +For instance, in the example above you can see that the user `myadmin` can *manage* user `user-1` because a `Allow managing all realm users` permission +voted to a `PERMIT`, therefore granting access to the `manage` scope. However, all the other scopes were denied. + +Combined with the searching capabilities from the *Permissions* tab, you can perform troubleshooting to identify any permission that +is not behaving as expected. + +When evaluating permissions, the following rules apply: + +* The outcome from resource-specific permissions have precedence over broader permissions that give access to all resources of a certain type +* If no permissions exist for a specific resource, access will be granted based on the permission that grants access to all resources of a certain type +* The outcome from different permissions that enforce access to a specific resource will only grant access if they all permit access to the resource + +[[_resolving-conflicting-permissions]] +===== Resolving conflicting permissions + +Permissions can have multiple policies associated with them. As the authorization model evolves, it is common for some policies within a permission or +even different permissions related to a specific resource to conflict. + +The evaluation outcome will be "denied" whenever any permission is evaluated to "DENY." If there are multiple permissions related to the same resource, +all of them must grant access in order for the outcome to be "granted." + +IMPORTANT: Fine-grained admin permissions allow you to set up permissions for individual resources or for the resource type itself (such as all users, +all groups, and so on.). If a permission or permissions related to a specific resource exist, the "all-resource" permission is *NOT* taken into account +during evaluation. If no specific permission exists, the fallback is to the "all-resource" permission. This approach helps address scenarios like +allowing members of the `realm-admins` group to manage members of realm groups, but preventing them from managing members of the `realm-admins` group +themselves. + +[[_realm_access_control]] +==== Accessing a Realm administration console as a Realm Administrator + +Realm administrators can access a dedicated realm-specific administration console that allows them to manage resources within their assigned realm. +This console is separate from the main {project_name} Admin Console, which is typically used by server administrators. + +For more details on dedicated realm administration consoles and available roles, refer to: <<_per_realm_admin_permissions, Dedicated admin consoles>>. + +To access the administration console, a realm administrator must have at least one of the following roles assigned, depending on the resources they +need to administer: + +- *query-users* – Required to query realm users. +- *query-groups* – Required to query realm groups. +- *query-clients* – Required to query realm clients. + +By granting any of these roles to a realm user, they will be able to access the administration console, but only for the +areas that correspond to roles granted. For instance, if you assign the `query-users` role, the realm administrator +will only have access to the `Users` section in the administration console. If an administrator is responsible for +multiple resource types (such as both users and groups), they must have all the corresponding "query-*" roles assigned. + +These roles enable basic access to query resources but do not grant permission to view or modify them. To grant or deny access +to realm resources you need to set up the permissions for any of the operations available from each resource type. +For more details, see link:#_managing-permissions[Managing Permissions]. + +===== Roles and Permission relationship + +Fine grained permissions are used to grant additional permissions. You cannot override the default behavior of the built-in admin roles. +If a realm administrator is assigned one or more admin roles, it prevents the permissions from being evaluated. This means that +if a respective admin role is assigned to a realm administrator, permission evaluation will be bypassed, and access will be granted. + +[cols="30%,70%"] +|=== +| *Admin Role* | *Description* + +| *query-users* | A realm administrator can see the *Users* section in administration console and can search for users in the realm. + It does not grant the ability to *view* users. +| *query-groups* | A realm administrator can see the *Groups* section in administration console and can search for groups in the realm. + It does not grant the ability to *view* groups. +| *query-clients* | A realm administrator can see the *Clients* section in administration console and can search for clients in the realm. + It does not grant the ability to *view* clients. +| *view-users* | A realm administrator can *view* all users and groups in the realm. +| *manage-users* | A realm administrator can *view*, *map-roles*, *manage-group-membership* and *manage* all users in the realm, + as well as *view*, *manage-membership* and *manage* groups in the realm. +| *impersonation* | A realm administrator can *impersonate* all users in the realm. +| *view-clients* | A realm administrator can *view* all clients in the realm. +| *manage-clients* | A realm administrator can *view* and *manage* all clients and client scopes in the realm. +|=== + +==== Understanding some common use cases + +Consider a situation where an administrator wants to allow a group of administrators to manage all users in the realm except those that +belong to the administrators group. This example includes a `test` realm and a `test-admins` group. + +===== Allowing to manage users by group of administrators + +Create user permission permission, allowing to view and manage all users in the realm for members of the `test-admins` group: + +* Navigate to the *Permissions* tab in the administration console. +* Click *Create permission* and choose *Users* resource type. +* Fill in the name, such as `Disallow managing test-admins`. +* Choose *view* and *manage* authorization scopes, keep checked *All Users*. +* Create a condition, which needs to be met to get an access by clicking *Create new policy*. +* Fill in the name `Allow test-admins`, select *Group* as *Policy type*. +* Click *Add groups* button and select `test-admins` group, click *Save*. +* Click *Save* on *Create permission* page. + +===== Allowing to manage users by group of admins but not group members + +Let's exlude the members of the group itself, so that `test-admins` cannot manage other admins. + +* Create new permission by clicking *Create permission*. +* This time choose *Groups* resource type. +* Fill in the name, such as `Disallow managing test-admins`. +* Choose *manage-members* authorization scope. +* Select *Specific Groups* and choose `test-admins` group. +* *Create new policy* of type *Group*. +* Fill the name `Disallow test-admins` and select `test-admins` group. +* Switch to *Negative Logic* for the policy, *Save* the policy +* *Save* the permission + +===== Allowing to impersonate users for members of a group with a specific role assigned + +- Create a "User Permission" for specific users (or all users) you want to allow impersonation. +- Create a "Group Policy" allowing access to members of `test-admins`. +- Create a "Role Policy" allowing access to users assigned the `impersonation-admin` role. +- Assign both policies to the permission. + +===== Blacklisting specific users from being impersonated + +- Create a *User Permission* for the specific users you want to prevent from being impersonated. +- Create any policy that evaluates to deny (such as a user policy with no users selected). +- Assign the policy to the permission to effectively block impersonation for the selected users. + +===== Allowing to view users but not managing them for admins with a defined role assigned + +- Create a "User Permission" with the *view* scope for all users. +- Create a "Role Policy" allowing access to users with specific role assigned. +- Do _not_ assign the `manage` scope to prevent modification of user details. + +===== Allowing to manage users and role assignment for members of a group + +- Create a "User Permission" with the *manage*, *map-roles* scopes for all users. +- Create a "Group Policy" allowing access to members of `test-admins`. + +===== Allowing to view and manage members of a group but not members of its subgroups + +- Create a "Group Permission" with the *view-members* and *manage-members* scopes for specific group `mygroup`. +- Assign a "Group Policy" targeting `test-admins` to it. +- Create another "Group Permission" with the *view-members* and *manage-members* scopes for specific group, select all subgroups of the `mygroup`. +- Create negative "Group Policy" for `test-admins` and assign it to the "subgroups" permission. + +===== Allowing to impersonate members of a specific group + +- Create a "Group Permission" with the *impersonate-members* for specific group `mygroup`. +- Assign a "Group Policy" targeting `mygroup-helpdesk` to it. + +==== Performance considerations + +When enabling the feature to a realm, there is an additional overhead when realm administrators are managing any of the +supported resource types. This is mainly true when performing these operations: + +* Listing and searching +* Updating or deleting + +The feature introduces additional checks whenever you are listing or managing realm resources in order to enforce access +based on the permissions you have defined. This is mainly true when querying realm resources due to the additional overhead +to partially evaluate the permissions for a realm administrator to filter and paginate the results. + +Fewer permissions referencing a realm administrator user and most of the resources they can access is better. For instance, +if you want to delegate access to a realm administrator to manage users, it is better to have those users as members of a group. By doing that, +you are improving not only the performance when evaluating permissions but also creating a permission model that is easier to manage. + +The main impact of access enforcement is when querying realm resources. If a realm administrator is, for instance, referenced +in thousands of permissions through a user, role, or group policy, the partial evaluation mechanism that happens when querying +realm resources will query all those permissions from the database. A more concise and optimized model will help to fetch fewer +permissions but the enough to grant or deny access to realm resources. + +For instance, granting access to a realm administrator to view and manage users in a realm is better done with a group permission +than create individual permissions for each individual user in a realm. As well as make sure the policies associated with a +permission referencing a realm administrator either by a direct reference (user policy), +or indirect (role or group policy) reference, do not span multiple (thousands of) permissions, regardless of the resource type. + +As an example, suppose you have three users in a realm, and you want to allow `bob`, a realm administrator, to `view` and `manage` them. +A non-optimal permission model would create three different permissions, for each user, where a user policy grants access to `bob`. Instead, +you can have a single group permission, or even a single user permission, that groups those three users while still granting access to `bob` +using the same user policy. + +The same is true if you want to give access to more realm administrators to those three users. Instead of creating individual policies, +you can consider using a group or role policy instead. The permission model is use-case-specific, but these recommendations are important +to provide not only better manageability but also improve the overall performance of the server when managing realm resources. + +In terms of server configuration, depending on the size of your realm and the number of permissions and policies you have, you might consider +changing the cache configuration to increase the size of the following caches: + +* `realms` +* `users` +* `authorization` + +Consider looking at the server metrics for these caches to find the best value when sizing your deployment. + +When filtering resources, the partial evaluation mechanism will eventually rely on `IN` clauses in SQL statements +to filter the results. Depending on your database, you might have limitations on the number of parameters for the `IN` clause. +That is the case for old versions of the Oracle database, which has a hard limit to 1000 parameters. To avoid such problems, +keep in mind the considerations above about the number of permissions that grants or deny access to a single realm administrator. diff --git a/docs/documentation/server_admin/topics/admin-console-permissions/fine-grain.adoc b/docs/documentation/server_admin/topics/admin-console-permissions/fine-grain.adoc index d7910c362278..0d79a1e2cb91 100644 --- a/docs/documentation/server_admin/topics/admin-console-permissions/fine-grain.adoc +++ b/docs/documentation/server_admin/topics/admin-console-permissions/fine-grain.adoc @@ -1,10 +1,9 @@ -[[_fine_grain_permissions]] -=== Fine grain admin permissions +=== Fine grained admin permissions V1 -:tech_feature_name: Fine Grain Admin Permissions -:tech_feature_id: admin-fine-grained-authz -include::../templates/techpreview.adoc[] +IMPORTANT: fine-grained admin permissions V1 have been replaced by a <<_fine_grained_permissions, new version>>. +Version 1 of the feature is still marked as preview and is available, but it may be deprecated and removed +in future. To enable it, start the server with `--features=admin-fine-grained-authz:v1`. Sometimes roles like `manage-realm` or `manage-users` are too coarse grain and you want to create restricted admin accounts that have more fine grain permissions. {project_name} allows you to define @@ -298,4 +297,3 @@ manage-membership:: Policies that decide if an admin can change the membership of the group. Add or remove members from the group. - diff --git a/docs/documentation/server_admin/topics/admin-console-permissions/master-realm.adoc b/docs/documentation/server_admin/topics/admin-console-permissions/master-realm.adoc index 3c0b626a8850..4f2374df4f79 100644 --- a/docs/documentation/server_admin/topics/admin-console-permissions/master-realm.adoc +++ b/docs/documentation/server_admin/topics/admin-console-permissions/master-realm.adoc @@ -1,9 +1,10 @@ +[[_master_realm_access_control]] === Master realm access control The `master` realm in {project_name} is a special realm and treated differently than other realms. Users in the {project_name} `master` realm can be granted permission to manage zero or more realms that are deployed on the {project_name} server. -When a realm is created, {project_name} automatically creates various roles that grant fine-grain permissions to access that new realm. +When a realm is created, {project_name} automatically creates various roles that grant permissions to access that new realm. Access to The Admin Console and Admin REST endpoints can be controlled by mapping these roles to users in the `master` realm. It's possible to create multiple superusers, as well as users that can only manage specific realms. @@ -27,18 +28,24 @@ level of access to manage an individual realm. The roles available are: -* view-realm -* view-users -* view-clients -* view-events -* manage-realm -* manage-users * create-client +* impersonation +* manage-authorization * manage-clients -* manage-events -* view-identity-providers +* manage-events * manage-identity-providers -* impersonation +* manage-realm +* manage-users +* query-clients +* query-groups +* query-realms +* query-users +* view-authorization +* view-clients +* view-events +* view-identity-providers +* view-realm +* view-users Assign the roles you want to your users and they will only be able to use that specific part of the administration console. diff --git a/docs/documentation/server_admin/topics/admin-console-permissions/per-realm.adoc b/docs/documentation/server_admin/topics/admin-console-permissions/per-realm.adoc index 80a4f5d39998..78131ff55106 100644 --- a/docs/documentation/server_admin/topics/admin-console-permissions/per-realm.adoc +++ b/docs/documentation/server_admin/topics/admin-console-permissions/per-realm.adoc @@ -8,18 +8,25 @@ Users within that realm can be granted realm management permissions by assigning Each realm has a built-in client called `realm-management`. You can view this client by going to the `Clients` left menu item of your realm. This client defines client-level roles that specify permissions that can be granted to manage the realm. -* view-realm -* view-users -* view-clients -* view-events -* manage-realm -* manage-users * create-client +* impersonation +* manage-authorization * manage-clients * manage-events -* view-identity-providers * manage-identity-providers -* impersonation +* manage-realm +* manage-users +* query-clients +* query-groups +* query-realms +* query-users +* realm-admin +* view-authorization +* view-clients +* view-events +* view-identity-providers +* view-realm +* view-users Assign the roles you want to your users and they will only be able to use that specific part of the administration console. diff --git a/docs/documentation/server_admin/topics/authentication/conditions.adoc b/docs/documentation/server_admin/topics/authentication/conditions.adoc index 14e8cb4de1aa..9090c8ae96fc 100644 --- a/docs/documentation/server_admin/topics/authentication/conditions.adoc +++ b/docs/documentation/server_admin/topics/authentication/conditions.adoc @@ -107,19 +107,22 @@ The last thing is defining the property with an error message in the login theme deny-role1 = You do not have required role! ---- +[#twofa-conditional-workflow-examples] ==== 2FA conditional workflow examples The section presents some examples of conditional workflows that integrates 2nd Factor Authentication (2FA) in different ways. The examples copy the default `browser` flow and modify the configuration inside the `forms` sub-flow. ===== Conditional 2FA sub-flow -The default `browser` flow uses a `Conditional OTP` sub-flow that already gives a 2FA with OTP Form (One Time Password). Following the same idea, different 2FA methods can be integrated with the `Condition - User Configured`. +The default `browser` flow uses a `Conditional 2FA` sub-flow that already gives 2nd factor Authentication (2FA) with OTP Form (One Time Password). It also provides WebAuthn and Recovery Codes but they are disabled by default. Consistent with this approach, different 2FA methods can be integrated with the `Condition - User Configured`. .2FA all alternative image:images/2fa-example1.png[2FA all alternative] The `forms` sub-flow contains another `2FA` conditional sub-flow with `Condition - user configured`. Three 2FA steps (OTP, Webauthn and Recovery Codes) are allowed as alternative steps. The user will be able to choose one of the three options, if they are configured for the user. As the sub-flow is conditional, the authentication process will complete successfully if no 2FA credential is configured. +This configuration provides the same behavior as when you configure with the default *browser* flow with both _Disabled_ steps are configured to _Alternative_. + ===== Conditional 2FA sub-flow and deny access The second example continues the previous one. After the `2FA` sub-flow, another flow `Deny access if no 2FA` is used to check if the previous `2FA` was not executed. In that case (the user has no 2FA credential configured) the access is denied. diff --git a/docs/documentation/server_admin/topics/authentication/flows.adoc b/docs/documentation/server_admin/topics/authentication/flows.adoc index 08479f08254c..7dcfe808020b 100644 --- a/docs/documentation/server_admin/topics/authentication/flows.adoc +++ b/docs/documentation/server_admin/topics/authentication/flows.adoc @@ -36,32 +36,30 @@ Since this sub-flow is marked as _alternative_, it will not be executed if the * The first execution is the *Username Password Form*, an authentication type that renders the username and password page. It is marked as _required_, so the user must enter a valid username and password. -The second execution is the *Browser - Conditional OTP* sub-flow. This sub-flow is _conditional_ and executes depending on the result of the *Condition - User Configured* execution. If the result is true, {project_name} loads the executions for this sub-flow and processes them. +The second execution is the *Browser - Conditional 2FA* sub-flow. This sub-flow is _conditional_ and executes depending on the result of the *Condition - User Configured* execution. If the result is true, {project_name} loads the executions for this sub-flow and processes them. -The next execution is the *Condition - User Configured* authentication. This authentication checks if {project_name} has configured other executions in the flow for the user. The *Browser - Conditional OTP* sub-flow executes only when the user has a configured OTP credential. +The next execution is the *Condition - User Configured* authentication. This authentication checks if {project_name} has configured other executions in the flow for the user. The *Browser - Conditional 2FA* sub-flow executes only when the user has a configured OTP credential. -The final execution is the *OTP Form*. {project_name} marks this execution as _required_ but it runs only when the user has an OTP credential set up because of the setup in the _conditional_ sub-flow. If not, the user does not see an OTP form. +The final execution is the *OTP Form*. {project_name} marks this execution as _alternative_, but it runs only when the user has an OTP credential set up because of the setup in the _conditional_ sub-flow. If the OTP credential is not set up, the user does not see an OTP form. -===== Requirement -A set of radio buttons that control the execution of an action executes. +The default *browser* flow contains two more executions inside the *Browser - Conditional 2FA*, *WebAuthn Authenticator* and *Recovery Authentication Code Form*. These executions are _Disabled_ by default and they are the other 2FA methods that can be added to the flow. Change the requirement from _Disabled_ to _Alternative_ to make them available if the respective credential has been configured for the user. If the user has configured all alternative credential types, the credential with the highest priority is displayed by default. However, the *Try Another Way* option will appear so that the user has the alternative methods to log in. [[_execution-requirements]] -====== Required +===== Requirement +A drop-down menu that controls the execution of an action. +Required:: All _Required_ elements in the flow must be successfully sequentially executed. The flow terminates if a required element fails. -====== Alternative - +Alternative:: Only a single element must successfully execute for the flow to evaluate as successful. Because the _Required_ flow elements are sufficient to mark a flow as successful, any _Alternative_ flow element within a flow containing _Required_ flow elements will not execute. -====== Disabled - +Disabled:: The element does not count to mark a flow as successful. -====== Conditional - +Conditional:: This requirement type is only set on sub-flows. - ++ * A _Conditional_ sub-flow contains executions. These executions must evaluate to logical statements. * If all executions evaluate as _true_, the _Conditional_ sub-flow acts as _Required_. * If any executions evaluate as _false_, the _Conditional_ sub-flow acts as _Disabled_. @@ -235,6 +233,17 @@ Creating an advanced flow such as this can have side effects. For example, if yo * In the *Action* menu, select *Bind flow* and select *Reset credentials flow* from the dropdown and click *Save* ==== +[[_client-policy-auth-flow]] +==== Using Client Policies to Select an Authentication Flow +<<_client_policies, Client Policies>> can be used to dynamically select an Authentication Flow based on specific conditions, such as requesting a particular scope or an ACR (Authentication Context Class Reference) using the `AuthenticationFlowSelectorExecutor` in combination with the condition you prefer. + +The `AuthenticationFlowSelectorExecutor` allows you to select an appropriate authentication flow and set the level of authentication to be applied once the selected flow is completed. + +A possible configuration involves using the `ACRCondition` in combination with the `AuthenticationFlowSelectorExecutor`. This setup enables you to select an authentication flow based on the requested ACR and have the ACR value included in the token using <<_mapping-acr-to-loa-realm,ACR to LoA Mapping>>. + +For more details, see <<_client_policies, Client Policies>>. + + [[_step-up-flow]] ==== Creating a browser login flow with step-up mechanism @@ -388,6 +397,13 @@ not be the desired behavior. NOTE: A conflict situation may arise when an admin specifies several flows, sets different LoA levels to each, and assigns the flows to different clients. However, the rule is always the same: if a user has a certain level, it needs only have that level to connect to a client. It's up to the admin to make sure that the LoA is coherent. +NOTE: Step-up authentication with Level of Authentication conditions is intended for use cases where each level +requires all authentication methods from the preceding levels. +For instance, level X must always include all authentication methods required by level X-1. +For use cases where a specific level, such as level 3, requires a different authentication method from the previous levels, +it may be more appropriate to use mapping of ACR to a specific flow. +For more details, see <<_client-policy-auth-flow, Using Client Policies to Select an Authentication Flow>>. + *Example scenario* . Max Age is configured as 300 seconds for level 1 condition. diff --git a/docs/documentation/server_admin/topics/authentication/passkeys.adoc b/docs/documentation/server_admin/topics/authentication/passkeys.adoc index 0e668ba03f07..b4fd2d125814 100644 --- a/docs/documentation/server_admin/topics/authentication/passkeys.adoc +++ b/docs/documentation/server_admin/topics/authentication/passkeys.adoc @@ -4,8 +4,18 @@ {project_name} provides preview support for https://fidoalliance.org/passkeys/[Passkeys]. {project_name} works as a Passkeys Relying Party (RP). -Passkey registration and authentication are realized by the features of xref:webauthn_{context}[WebAuthn]. -Therefore, users of {project_name} can do Passkey registration and authentication by existing xref:webauthn_{context}[WebAuthn registration and authentication]. +Passkey registration and authentication are performed using the same features of xref:webauthn_{context}[WebAuthn]. More specifically *Passkeys* are related to xref:_webauthn_loginless[LoginLess WebAuthn] as they try to avoid any password during login. +Therefore, users of {project_name} can do Passkey registration and authentication by existing xref:_enabling_organization_[WebAuthn registration and authentication], using the *passwordless* variants. The *Passkeys* feature have been integrated seamlessly in the default authentication forms, so, when activated, both conditional UI and modal UI are available in the forms in which the username input is displayed. + +*Passkeys* have been added to the following authenticator implementations: + +. *Username Password Form*: The username and password form used by default in {project_name}. +. *Username Form*: The form in which the username is displayed alone and is typically followed by the password form. This authenticator is used when the username and password fields want to be presented to the user in two different steps. Authenticating using *Passkeys* in the *Username Form* skips the next *Password Form* execution. The *Password Form* implementation checks if the user was already authenticated using a passwordless WebAuthn credential and, if that is the case, no password is requested. +. *Organization Identity - First Login*: The organization form that is used when the <<_enabling_organization_, organizations>> feature is enabled for the realm. Using *Passkeys* in this step avoids the subsequent execution of the username and password form in the same way than in the previous form. + +:tech_feature_name: Passkeys +:tech_feature_id: passkeys +include::../templates/techpreview.adoc[] [NOTE] ==== @@ -14,78 +24,37 @@ However, Passkeys operations success depends on the user's environment. Make sur ==== [[_passkeys-conditional-ui]] -==== Passkey Authentication with Conditional UI +==== Passkey Authentication with Conditional UI or autofill -Passkey Authentication with Conditional UI can authenticate a user with its passkey in the same way as in xref:_webauthn_loginless[LoginLess WebAuthn]. -This authentication shows a user a list of passkeys stored on a device where the user runs a browser. -Therefore, the user can select one of the passkeys in the list to authenticate them. Compared with xref:_webauthn_loginless[LoginLess WebAuthn], the authentication improves the user's experience of authentication. +The Conditional User Interface (UI) or autofill is a feature related to passkeys in which the username input (the field in which the username to login is typed) is tagged with a `webauthn` autofill detail token (for example using the attribute `autocomplete="username webauthn"`). When the user clicks in such an input field, the user agent (browser) can render a list of discovered credentials for the user to select from, and perhaps also give the user the option to _try another way_. If the user selects one of the presented passkeys, {project_name} initiates the WebAuthn authentication with that key and avoids any password typing. + +Compared with xref:_webauthn_loginless[LoginLess WebAuthn], the authentication improves the user's experience of authentication. + +.Passkey Authentication with Conditional UI Autofill using Chrome browser +image:images/passkey-conditional-ui-autofill.png[Passkey Authentication with Conditional UI Autofill using Chrome browser] [NOTE] ==== This authentication uses the https://github.com/w3c/webauthn/wiki/Explainer:-WebAuthn-Conditional-UI/[WebAuthn Conditional UI]. Therefore, this authentication success depends on the user's environment. -If the environment does not support WebAuthn Conditional UI, this authentication falls back to xref:_webauthn_loginless[LoginLess WebAuthn]. +If the environment does not support WebAuthn Conditional UI, the user should use the direct modal UI or username and password login. ==== -:tech_feature_name: Passkey Authentication -:tech_feature_setting: -Dkeycloak.profile.feature.passkeys=enabled -:tech_feature_id: passkeys -include::../templates/techpreview.adoc[] +==== Passkeys Authentication with Modal UI -.Procedure -===== Setup +Nevertheless, because conditional UI can sometimes not show all the credentials to the user, the modal UI can always be initiated using the button *Sign in with Passkey*. The Modal User Interface (UI) ensures all passkeys are usable, including the ones stored in hardware tokens or on other devices that cannot be enumerated without user interaction. -Set up Passkey Authentication with Conditional UI as follows: +.Passkey Authentication with Modal UI using Chrome browser +image:images/passkey-modal-ui.png[Passkey Authentication with Modal UI using Chrome browser] -. (if not already present) Register a new required action for WebAuthn passwordless support. Use the steps described in <<_webauthn-register, Enable WebAuthn Authenticator Registration>>. Register the `Webauthn Register Passwordless` action. +==== Setup -. Configure the `WebAuthn Passwordless Policy`. Perform the configuration in the Admin Console, `Authentication` section, in the tab `Policies` -> `WebAuthn Passwordless Policy`. Set *User Verification Requirement* to *required* and *Require discoverable credential* to *Yes* when you configure the policy for loginless scenario. Note that since there is no dedicated Loginless policy, it is impossible to mix authentication scenarios with user verification=no/discoverable credential=no and loginless scenarios (user verification=yes/discoverable credential=yes). +Set up Passkey Authentication for the default forms as follows: + +. (If not already done) Check the required action for *WebAuthn Register Passwordless* is enabled. Use the steps described in <<_webauthn-register, Enable WebAuthn Authenticator Registration>>, but using *WebAuthn Register Passwordless* instead of *WebAuthn Register*. + +. Configure the *WebAuthn Passwordless Policy* in the same way that is explained in xref:_webauthn_loginless[LoginLess WebAuthn]. Perform the configuration in the Admin Console, `Authentication` section, in the tab `Policies` → `WebAuthn Passwordless Policy`. Set *User Verification Requirement* to *required* and *Require discoverable credential* to *Yes* when you configure the policy for passwordless scenario. + NOTE: Storage capacity is usually very limited on hardware passkeys meaning that you cannot store many discoverable credentials on your passkey. However, this limitation may be mitigated for instance if you use an Android phone backed by a Google account as a passkey device or an iPhone backed by Bitwarden. - -. Configure the authentication flow. Create a new authentication flow, add the *Passkeys Conditional UI Authenticator* execution and set the Requirement setting of the execution to *Required*. + -The final configuration of the flow looks similar to this: -image:images/passkey-conditional-ui-flow.png[Passkey Authentication with Conditional UI flow flow] - -. Bind the flow above as a *browser* authentication flow in the realm as described in the <<_webauthn-register, WebAuthn section above>>. - -The authentication flow above requires that user must already have passkey credential on his or her account to be able to log in. This requirement means that all users in the realm must have passkeys already set. -That can be achieved for instance by enabling user registration as described below. - -===== Setup of the registration for passkeys conditional UI - -. Enable <> for your realm - -. In the <> of the realm, select flow *registration* and switch the authenticator *Password validation* to *Disabled*. -This means that newly registered users will not be required to create the passwords in this example setup. Users must always use passkeys instead of passwords. - -. Return to the *Required actions* sub-tab of the tab *Authentication* tab and find the `Webauthn Register Passwordless` action and mark it with *Set as default action*. -This means that it would be added to all new users after their registration. - -The alternative to the registration flow setup is to add the required action `WebAuthn Register Passwordless` to a user who is already known to {project_name}. The user with the required action configured will have to authenticate (with a username/password for example) and will then be prompted to register a passkey to be used for loginless authentication. - -[NOTE] -==== -We plan to improve the usability and allow integration of conditional passkeys with the existing authenticators and forms such as the default username / password form. -==== - -[NOTE] -==== -From https://www.w3.org/TR/webauthn-3/[Web Authn Level 3], *Resident Key* was replaced with *Discoverable Credential*. -==== - -If a user's browser supports https://github.com/w3c/webauthn/wiki/Explainer:-WebAuthn-Conditional-UI/[WebAuthn Conditional UI], the following screen is shown. - -.Passkey Authentication with Conditional UI -image:images/passkey-conditional-ui-authentication.png[Passkey Authentication with Conditional UI] - -When the user clicks the *Select your passkey* textbox, a list of passkeys stored on a device where the user runs a browse is shown as follows. - -.Passkey Authentication with Conditional UI Autofill -image:images/passkey-conditional-ui-autofill.png[Passkey Authentication with Conditional UI Autofill] - -If a user's browser does not support https://github.com/w3c/webauthn/wiki/Explainer:-WebAuthn-Conditional-UI/[WebAuthn Conditional UI], the authentication falls back to the xref:_webauthn_loginless[LoginLess WebAuthn] as follows. - -.Passkey Authentication with Conditional UI falling back to LoginLess WebAuthn -image:images/passkey-conditional-ui-fallback-authentication.png[Passkey Authentication with Conditional UI falling back to LoginLess WebAuthn] +. In the same policy tab *WebAuthn Passwordless Policy* activate the *Enable Passkeys* option at the bottom. This switch is the one that really enables passkeys (both conditional and modal UI) in the default username forms. \ No newline at end of file diff --git a/docs/documentation/server_admin/topics/authentication/password-policies.adoc b/docs/documentation/server_admin/topics/authentication/password-policies.adoc index 58088c10d98a..5e3589d0ae32 100644 --- a/docs/documentation/server_admin/topics/authentication/password-policies.adoc +++ b/docs/documentation/server_admin/topics/authentication/password-policies.adoc @@ -12,7 +12,7 @@ When {project_name} creates a realm, it does not associate password policies wit . Enter a value that applies to the policy chosen. . Click *Save*. + -Password policy +.Password policy image:images/password-policy.png[Password Policy] After saving the policy, {project_name} enforces the policy for new users. @@ -51,7 +51,7 @@ Supported password hashing algorithms are shown in the following table. It is highly recommended to use Argon2 when possible as it has significantly less CPU requirements compared to PBKDF2, while at the same time being more secure. -The default password hashing algorithm for the server can be configured with `--spi-password-hashing-provider-default=`. +The default password hashing algorithm for the server can be configured with `--spi-password-hashing--provider-default=`. To prevent excessive memory and CPU usage, the parallel computation of hashes by Argon2 is by default limited to the number of cores available to the JVM. To configure the Argon2 hashing provider, use its provider options. @@ -118,7 +118,7 @@ The password cannot be the same as the email address of the user. ===== Regular expression Password must match one or more defined Java regular expression patterns. -See https://docs.oracle.com/en/java/javase/17/docs/api/java.base/java/util/regex/Pattern.html[Java's regular expression documentation] for the syntax of those expressions. +See https://docs.oracle.com/en/java/javase/21/docs/api/java.base/java/util/regex/Pattern.html[Java's regular expression documentation] for the syntax of those expressions. ===== Expire password @@ -141,14 +141,14 @@ Password must not be in a blacklist file. * The value of the blacklist file must be the name of the blacklist file, for example, `100k_passwords.txt`. * Blacklist files resolve against `+${kc.home.dir}/data/password-blacklists/+` by default. Customize this path using: ** The `keycloak.password.blacklists.path` system property. -** The `blacklistsPath` property of the `passwordBlacklist` policy SPI configuration. To configure the blacklist folder using the CLI, use `--spi-password-policy-password-blacklist-blacklists-path=/path/to/blacklistsFolder`. +** The `blacklistsPath` property of the `passwordBlacklist` policy SPI configuration. To configure the blacklist folder using the CLI, use `--spi-password-policy--password-blacklist--blacklists-path=/path/to/blacklistsFolder`. .A note about False Positives The current implementation uses a BloomFilter for fast and memory efficient containment checks, such as whether a given password is contained in a blacklist, with the possibility for false positives. * By default a false positive probability of `0.01%` is used. -* To change the false positive probability by CLI configuration, use `--spi-password-policy-password-blacklist-false-positive-probability=0.00001`. +* To change the false positive probability by CLI configuration, use `--spi-password-policy--password-blacklist--false-positive-probability=0.00001`. [[maximum-authentication-age]] ===== Maximum Authentication Age diff --git a/docs/documentation/server_admin/topics/authentication/recovery-codes.adoc b/docs/documentation/server_admin/topics/authentication/recovery-codes.adoc index 870e1f6497fa..60a9cb50ea7c 100644 --- a/docs/documentation/server_admin/topics/authentication/recovery-codes.adoc +++ b/docs/documentation/server_admin/topics/authentication/recovery-codes.adoc @@ -1,9 +1,53 @@ [[_recovery-codes]] -=== Recovery Codes (RecoveryCodes) +=== Recovery Codes -You can configure Recovery codes for two-factor authentication by adding 'Recovery Authentication Code Form' as a two-factor authenticator to your authentication flow. For an example of configuring this authenticator, see xref:webauthn_{context}[WebAuthn]. +The Recovery Codes are a number of sequential one-time passwords (currently 12) auto-generated by {project_name}. The codes can be used as a 2nd Factor Authentication (2FA) by adding the `Recovery Authentication Code Form` authenticator to your authentication flow. When configured in the flow, {project_name} asks the user for the next generated code in order. When the current code is introduced by the user, it is removed and the next code will be required for the next login. -:tech_feature_name: RecoveryCodes -:tech_feature_id: recovery-codes -include::../templates/techpreview.adoc[] +Due to its nature, the Recovery Codes work normally as a backup for another 2FA methods. They can complement the `OTP Form` or the `WebAuthn Authenticator` to give a backing way to log inside {project_name}, for example, if the software or hardware device used for the previous 2FA methods is broken or unavailable. + +==== Check Recovery Codes required action is enabled + +Check the Recovery Codes action is enabled in {project_name}: + +. Click *Authentication* in the menu. +. Click the *Required Actions* tab. +. Ensure the *Recovery Authentication Codes* switch *Enabled* is set to *On*. + +Toggle the *Default Action* switch to *On* if you want all the new users to register their Recovery Codes credentials in the first login. + +==== Configure the Recovery Codes required action + +From the *Required Actions* tab of the admin console, you have the option to configure the *Recovery Authentication Codes* required action. So far, there is a configuration option +*Warning Threshold* available. When user has smaller amount of remaining recovery codes on his account than the value configured here, account console will show warning to the user, which will +recommend him to setup new set of recovery codes. The warning displayed to the user may look similar to this: + +.Recovery Codes Account console warning +image:images/recovery-codes-account-console-warn.png[Recovery Codes Account console warning] + +==== Adding Recovery Codes to the browser flow + +The following procedure adds the `Recovery Authentication Code Form` as an alternative way of login in the default *Browser* flow. + +. Click *Authentication* in the realm menu. +. Click the *Browser* flow. +. Locate the execution *Recovery Authentication Code Form* inside the *Browser - Conditional 2FA* sub-flow. +. Change the _requirement_ from _Disabled_ to _Alternative_ for that execution. ++ +.Recovery Codes Browser flow +image:images/recovery-codes-browser-flow.png[Recovery Codes Browser flow] ++ +With this configuration, both 2FA authenticators (`OTP Form` and `Recovery Authentication Code Form`) are alternate ways to log into {project_name}. If the user has configured both credential types, the credential with the highest priority will be displayed by default, but the *Try Another Way* option will appear so that the user has the alternative methods to log in. + +You can see more examples of 2FA configurations in <>. + +==== Creating the Recovery Codes credential + +Once the Recovery Codes required action is enabled and the credential type is managed in the flow, users can request to create their own codes. The action is just another <> that can be used in {project_name} (directly called by the user by using the Account Console or assigned by an administrator by using the Admin Console). + +The required action, when executed, generates the list of codes and presents it to the user. The action offers to print, download, or copy the list of codes to help the user to store them is a safe place. In order to complete the setup, the checkbox *I have saved these codes somewhere safe* should be previously checked. + +.Recovery Authentication Codes setup page +image:images/recovery-codes-setup.png[Recovery Authentication Codes setup page] + +The Recovery Codes can be re-created at any moment. diff --git a/docs/documentation/server_admin/topics/authentication/webauthn.adoc b/docs/documentation/server_admin/topics/authentication/webauthn.adoc index 29ea9cb37a0f..e7d5ec8c92f3 100644 --- a/docs/documentation/server_admin/topics/authentication/webauthn.adoc +++ b/docs/documentation/server_admin/topics/authentication/webauthn.adoc @@ -19,71 +19,45 @@ WebAuthn's specification uses a `user.id` to map a public key credential to a sp The setup procedure of WebAuthn support for 2FA is the following: [[_webauthn-register]] -===== Enable WebAuthn authenticator registration +===== Check WebAuthn authenticator registration is enabled . Click *Authentication* in the menu. . Click the *Required Actions* tab. -. Toggle the *Webauthn Register* switch to *ON*. +. Check action *Webauthn Register* switch is set to *ON*. Toggle the *Default Action* switch to *ON* if you want all new users to be required to register their WebAuthn credentials. [[_webauthn-authenticator-setup]] -==== Adding WebAuthn authentication to a browser flow +==== Enable WebAuthn authentication in the default browser flow . Click *Authentication* in the menu. . Click the *Browser* flow. -. Select *Duplicate* from the "Action list" to make a copy of the built-in *Browser* flow. -. Enter "WebAuthn Browser" as the name of the copy. -. Click *Duplicate*. -. Click the name to go to the details -. Click the trash can icon 🗑️ of the "WebAuthn Browser Browser - Conditional OTP" and click *Delete*. - -If you require WebAuthn for all users: - -. Click *+* menu of the *WebAuthn Browser Forms*. -. Click *Add step*. -. Click *WebAuthn Authenticator*. -. Click *Add*. -. Select *Required* for the *WebAuthn Authenticator* authentication type to set its requirement to required. +. Locate the execution *WebAuthn Authenticator* inside the *Browser - Conditional 2FA* sub-flow. +. Change the _requirement_ from _Disabled_ to _Alternative_ for that execution. + -image:images/webauthn-browser-flow-required.png[Webauthn browser flow required] -+ -. Click the *Action* menu at the top of the screen. -. Select *Bind flow* from the drop-down list. -. Select *Browser* from the drop-down list. -. Click *Save*. +.WebAuthn browser flow conditional with OTP +image:images/webauthn-browser-flow-conditional-with-OTP.png[WebAuthn browser flow conditional with OTP] -[NOTE] -==== -If a user does not have WebAuthn credentials, the user must register WebAuthn credentials. -==== +With this configuration, the users can choose between using WebAuthn and OTP for the second factor. As the sub-flow is _conditional_, they are only asked to present a 2FA credential (OTP or WebAuthn) if they have already registered one of the respective credential types. If a user has configured both credential types, the credential with the highest priority will be displayed by default. However, the *Try Another Way* option will appear so that the user has the alternative methods to log in. -Users can log in with WebAuthn if they have a WebAuthn credential registered only. So instead of adding the *WebAuthn Authenticator* execution, you can: +If you want to substitute OTP for WebAuthn and maintain it as conditional: -.Procedure -. Click *+* menu of the *WebAuthn Browser Forms* row. -. Click *Add sub-flow*. -. Enter "Conditional 2FA" for the _name_ field. -. Select *Conditional* for the *Conditional 2FA* to set its requirement to conditional. -. On the *Conditional 2FA* row, click the plus sign + and select *Add condition*. -. Click *Add condition*. -. Select *Condition - User Configured*. -. Click *Add*. -. Select *Required* for the *Condition - User Configured* to set its requirement to required. -. Drag and drop *WebAuthn Authenticator* into the *Conditional 2FA* flow -. Select *Alternative* for the *WebAuthn Authenticator* to set its requirement to alternative. +. Change _requirement_ in *OTP Form* to _Disabled_. +. Change _requirement_ in *WebAuthn Authenticator* to _Alternative_. + +.Webauthn browser flow conditional image:images/webauthn-browser-flow-conditional.png[Webauthn browser flow conditional] -The user can choose between using WebAuthn and OTP for the second factor: +If you require WebAuthn for all users and enforce them to configure the credential if not configured: -.Procedure -. On the *Conditional 2FA* row, click the plus sign + and select *Add step*. -. Select *OTP Form* from the list. -. Click *Add*. -. Select *Alternative* for the *OTP Form* to set its requirement to alternative. +. Change _requirement_ in *Browser - Conditional 2FA* to _Required_. +. Change _requirement_ in *OTP Form* to _Disabled_. +. Change _requirement_ in *WebAuthn Authenticator* to _Required_. + -image:images/webauthn-browser-flow-conditional-with-OTP.png[WebAuthn browser flow conditional with OTP] +.Webauthn browser flow required +image:images/webauthn-browser-flow-required.png[Webauthn browser flow required] + +You can see more examples of 2FA configurations in <>. ==== Authenticate with WebAuthn authenticator @@ -191,6 +165,13 @@ If `WebAuthn Authenticator` is set up as required as shown in the first example, After successful registration, the user's browser asks the user to enter the text of their WebAuthn authenticator's label. +[[_webauthn_aia]] +==== Registering WebAuthn credentials using AIA + +WebAuthn credentials can also be registered for a user using <>. The actions *Webauthn Register* (`kc_action=webauthn-register`) and *Webauthn Register Passwordless* (`kc_action=webauthn-register-passwordless`) are available for the applications if enabled in the <>. + +Both required actions allow a parameter *skip_if_exists* that allows to skip the AIA execution if the user already has a credential of that type. The `kc_action_status` will be *success* if skipped. For example, adding the option to the common WebAuthn register action is just using the following query parameter `kc_action=webauthn-register:skip_if_exists`. + [[_webauthn_passwordless]] ==== Passwordless WebAuthn together with Two-Factor @@ -239,9 +220,9 @@ An administrator typically requires that Passkeys registered by users for the We Set up WebAuthn Loginless support as follows: -. (if not already present) Register a new required action for WebAuthn passwordless support. Use the steps described in <<_webauthn-register, Enable WebAuthn Authenticator Registration>>. Register the `Webauthn Register Passwordless` action. +. (If not already done) Check the required action for *WebAuthn Register Passwordless* is enabled. Use the steps described in <<_webauthn-register, Enable WebAuthn Authenticator Registration>>, but using *WebAuthn Register Passwordless* instead of *WebAuthn Register*. -. Configure the `WebAuthn Passwordless Policy`. Perform the configuration in the Admin Console, `Authentication` section, in the tab `Policies` -> `WebAuthn Passwordless Policy`. You have to set *User Verification Requirement* to *required* and *Require Discoverable Credential* to *Yes* when you configure the policy for loginless scenario. Note that since there isn't a dedicated Loginless policy it won't be possible to mix authentication scenarios with user verification=no/discoverable credential=no and loginless scenarios (user verification=yes/discoverable credential=yes). Storage capacity is usually very limited on Passkeys meaning that you won't be able to store many discoverable credentials on your Passkey. +. Configure the `WebAuthn Passwordless Policy`. Perform the configuration in the Admin Console, `Authentication` section, in the tab `Policies` -> `WebAuthn Passwordless Policy`. You have to set *User Verification Requirement* to *required* and *Require Discoverable Credential* to *Yes* when you configure the policy for loginless scenario. Storage capacity is usually very limited on Passkeys meaning that you won't be able to store many discoverable credentials on your Passkey. . Configure the authentication flow. Create a new authentication flow, add the "WebAuthn Passwordless" execution and set the Requirement setting of the execution to *Required* diff --git a/docs/documentation/server_admin/topics/authentication/x509.adoc b/docs/documentation/server_admin/topics/authentication/x509.adoc index 7ff66fbd68d3..f2a4ae0da14b 100644 --- a/docs/documentation/server_admin/topics/authentication/x509.adoc +++ b/docs/documentation/server_admin/topics/authentication/x509.adoc @@ -134,6 +134,9 @@ Use CDP to check the certificate revocation status. Most PKI authorities include *CRL file path*:: The path to a file containing a CRL list. The value must be a path to a valid file if the *CRL Checking Enabled* option is enabled. +*CRL abort if non updated*:: +A CRL conforming to link:https://datatracker.ietf.org/doc/html/rfc5280#section-5.1.2.5[RFC5280] contains a next update field that indicates the date by which the next CRL will be issued. When that time is passed, the CRL is considered outdated and it should be refreshed. If this option is `true`, the authentication will fail if the CRL is outdated (recommended). If the option is set to `false`, the outdated CRL is still used to validate the user certificates. + *OCSP Checking Enabled*:: Checks the certificate revocation status by using Online Certificate Status Protocol. diff --git a/docs/documentation/server_admin/topics/clients/assembly-client-oidc.adoc b/docs/documentation/server_admin/topics/clients/assembly-client-oidc.adoc index 223ef48ac583..de58e9920853 100644 --- a/docs/documentation/server_admin/topics/clients/assembly-client-oidc.adoc +++ b/docs/documentation/server_admin/topics/clients/assembly-client-oidc.adoc @@ -16,5 +16,6 @@ include::oidc/proc-secret-rotation.adoc[leveloffset=+1] include::oidc/proc-using-a-service-account.adoc[leveloffset=+1] +include::oidc/con-token-role-mappings.adoc[leveloffset=+1] include::oidc/con-audience.adoc[leveloffset=+1] diff --git a/docs/documentation/server_admin/topics/clients/client-policies.adoc b/docs/documentation/server_admin/topics/clients/client-policies.adoc index 1ae912e22c8e..f5274b854056 100644 --- a/docs/documentation/server_admin/topics/clients/client-policies.adoc +++ b/docs/documentation/server_admin/topics/clients/client-policies.adoc @@ -94,6 +94,13 @@ Client Attribute:: Any Client:: This condition always evaluates to true. It can be used for example to ensure that all clients in the particular realm are FAPI compliant. +ACR Condition:: + Applied when an ACR value requested in the authentication request matches the value configured in the condition. For example, it can be used to select an authentication flow based on the requested ACR value. For more details, see the <<_client-policy-auth-flow, related documentation>> and the https://openid.net/specs/openid-connect-core-1_0.html#acrSemantics[official OIDC specification]. + +Grant Type:: + Evaluates to true when a specific grant type is used. For example, it can be used in combination with Client Scope to block a token exchange request when a specific client scope is requested. + + === Executor An executor specifies what action is executed on a client to which a policy is adopted. The executor executes one or several specified actions. For example, @@ -143,6 +150,8 @@ One of several purposes for this executor is to realize the security requirement * Enforce a valid redirect URI that the OAuth 2.1 specification requires * Enforce SAML Redirect binding cannot be used or SAML requests and assertions are signed +Another available executor is the `auth-flow-enforce`, which can be used to enforce an authentication flow during an authentication request. For instance, it can be used to select a flow based on certain conditions, such as a specific scope or an ACR value. For more details, see the <<_client-policy-auth-flow, related documentation>>. + [[_client_policy_profile]] === Profile diff --git a/docs/documentation/server_admin/topics/clients/con-client-scopes.adoc b/docs/documentation/server_admin/topics/clients/con-client-scopes.adoc index 28ff3911eb04..f10082544ccf 100644 --- a/docs/documentation/server_admin/topics/clients/con-client-scopes.adoc +++ b/docs/documentation/server_admin/topics/clients/con-client-scopes.adoc @@ -10,7 +10,7 @@ Client scopes also support the OAuth 2 *scope* parameter. Client applications us include::proc-creating-client-scopes.adoc[] - +[[_client_scopes_protocol]] == Protocol When you create a client scope, choose the *Protocol*. Clients linked in the same scope must have the same protocol. @@ -21,7 +21,7 @@ Each realm has a set of pre-defined built-in client scopes in the menu. ** *roles* + This scope is not defined in the OpenID Connect specification and is not added automatically to the *scope* claim in the access token. This scope has mappers, which are used to add the roles of the user to the access token and -add audiences for clients that have at least one client role. These mappers are described in more detail in the <<_audience_resolve, Audience section>>. +add audiences for clients that have at least one client role. These mappers are described in more detail in the <<_oidc_token_role_mappings, Token Role mappings section>> and <<_audience_resolve, Audience section>>. + ** *web-origins* + @@ -60,9 +60,20 @@ Display On Consent Screen:: Consent Screen Text:: The text displayed on the consent screen when this client scope is added to a client when consent required defaults to the name of client scope. The value for this text can be customised by specifying a substitution variable with *${var-name}* strings. The customised value is configured within the property files in your theme. See the link:{developerguide_link}[{developerguide_name}] for more information on customisation. +== Include in token scope + +There is the *Include in token scope* switch on the client scope. If on, the name of this client scope will be added to the access token property scope, and to the Token Response and Token Introspection Endpoint +response claim `scope`. If off, this client scope will be omitted from the token and from the Token Introspection Endpoint response. As mentioned above, some built-in client scopes have this switch disabled, which means +that they are not included in the `scope` claim even if they are applied for the particular request. + [[_client_scopes_linking]] == Link client scope with the client -Linking between a client scope and a client is configured in the *Client Scopes* tab of the client. Two ways of linking between client scope and client are available. +Linking between a client scope and a client is configured in the *Client Scopes* tab of the client. Here is how it looks for the client application `myclient`: + +.Client scopes linking to client +image:images/client-scopes-default.png[] + +There are two ways of linking between the client scope and the client. Default Client Scopes:: This setting is applicable to the OpenID Connect and SAML clients. Default client scopes are applied when issuing OpenID Connect tokens or SAML assertions for a client. The client will inherit Protocol Mappers and Role Scope Mappings that are defined on the client scope. For the OpenID Connect Protocol, the Mappers and Role Scope Mappings are always applied, regardless of the value used for the scope parameter in the OpenID Connect authorization request. @@ -80,10 +91,27 @@ scope=openid phone The scope parameter contains the string, with the scope values divided by spaces. The value *openid* is the meta-value used for all OpenID Connect requests. The token will contain mappers and role scope mappings from the default client scopes *profile* and *email* as well as *phone*, an optional client scope requested by the scope parameter. +[[_client_scopes_dedicated]] +=== Dedicated client scope + +There is a special client scope, which is linked to every client. It is a dedicated client scope, which is always shown as the first client scope when you click on the tab *Client scopes* of the particular client. +For example, for client `myclient`, the client scope is shown as `myclient-dedicated`. This client scope represents the protocol mappers and role scope mappings, which are linked directly to the client itself. + +It is not possible to unlink the dedicated client scope from a client. Also, it is not possible to link this dedicated client scope to a different client. In other words, the dedicated client scope is useful +just for protocol mappers and role scope mappings, which are specific to a single client. In case you want to share the same protocol mapper configuration among multiple clients, it is usually useful to create +a client scope in the realm tab *Client scopes* and then link this shared client scope to every client that should apply this shared configuration. + +In the tab *Scope* of the dedicated client scope, you can define role scope mappings applicable to this client. You can also see the switch *Full scope allowed* in this tab. +The details about this switch are described in <<_role_scope_mappings, this section>> and in <<_oidc_token_role_mappings,this section>>. + +NOTE: In the admin REST API and in the internal {project_name} storage, the dedicated client scope does not exist as its protocol mappers and role scope mappings are internally linked to the client itself. The. +dedicated client scope is in fact just an abstraction for the admin console UI. + [[_client_scopes_evaluate]] == Evaluating Client Scopes include::proc-evaluating-client-scopes.adoc[] +[[client-scopes-permissions]] == Client scopes permissions When issuing tokens to a user, the client scope applies only if the user is permitted to use it. @@ -96,16 +124,14 @@ If a user is not permitted to use the client scope, no protocol mappers or role include::proc-updating-default-scopes.adoc[] == Scopes explained + +The term _scope_ has multiple meanings within {project_name} and across the OAuth/OIDC specifications. Below is a clarification of the different _scopes_ used in {project_name}: + Client scope:: Client scopes are entities in {project_name} that are configured at the realm level and can be linked to clients. Client scopes are referenced by their name when a request is sent to the {project_name} authorization endpoint with a corresponding value of the *scope* parameter. See the <<_client_scopes_linking, client scopes linking>> section for more details. Role scope mapping:: This is available under the *Scope* tab of a client or client scope. Use *Role scope mapping* to limit the roles that can be used in the access tokens. See the <<_role_scope_mappings, Role Scope Mappings section>> for more details. -ifeval::[{project_community}==true] - Authorization scopes:: The *Authorization Scope* covers the actions that can be performed in the application. See the link:{authorizationguide_link}[Authorization Services Guide] for more details. - -endif::[] - diff --git a/docs/documentation/server_admin/topics/clients/oidc/con-advanced-settings.adoc b/docs/documentation/server_admin/topics/clients/oidc/con-advanced-settings.adoc index fc830e4d9d6a..fd3892c9511e 100644 --- a/docs/documentation/server_admin/topics/clients/oidc/con-advanced-settings.adoc +++ b/docs/documentation/server_admin/topics/clients/oidc/con-advanced-settings.adoc @@ -4,7 +4,7 @@ After completing the fields on the *Settings* tab, you can use the other tabs to perform advanced configuration. ifeval::[{project_community}==true] -For example, you can use the *Permissions* and *Roles* tabs to configure fine-grained authentication for administrators. See <<_fine_grain_permissions, Fine grain admin permissions>>. Also, see the remaining sections in this chapter for other capabilities. +For example, you can use the *Roles* or *Client scopes* tabs to configure client roles defined for the client or manage client scopes for the client. Also, see the remaining sections in this chapter for other capabilities. endif::[] == Advanced tab @@ -104,7 +104,7 @@ DPoP binds an access token and a refresh token together with the public part of This type of token is a holder-of-key token. Unlike bearer tokens, the recipient of a holder-of-key token can verify if the sender of the token is legitimate. -If the client switch `OAuth 2.0 DPoP Bound Access Tokens Enabled` is on, the workflow is: +If the client switch `Require Demonstrating Proof of Possession (DPoP) header in token requests` is on, the workflow is: . A token request is sent to the token endpoint in an authorization code flow or hybrid flow. . {project_name} requests a DPoP proof. @@ -113,7 +113,7 @@ If the client switch `OAuth 2.0 DPoP Bound Access Tokens Enabled` is on, the wor If verification fails, {project_name} rejects the token. -If the switch `OAuth 2.0 DPoP Bound Access Tokens Enabled` is off, the client can still send `DPoP` proof in the token request. In that case, {project_name} will verify DPoP proof +If the switch `Require Demonstrating Proof of Possession (DPoP) header in token requests` is off, the client can still send `DPoP` proof in the token request. In that case, {project_name} will verify DPoP proof and will add the thumbprint to the token. But if the switch is off, DPoP binding is not enforced by the {project_name} server for this client. It is recommended to have this switch on if you want to make sure that particular client always uses DPoP binding. diff --git a/docs/documentation/server_admin/topics/clients/oidc/con-audience.adoc b/docs/documentation/server_admin/topics/clients/oidc/con-audience.adoc index 3d3ac7cc0d04..40799ee6dfbf 100644 --- a/docs/documentation/server_admin/topics/clients/oidc/con-audience.adoc +++ b/docs/documentation/server_admin/topics/clients/oidc/con-audience.adoc @@ -2,78 +2,140 @@ = Audience support [role="_abstract"] Typically, the environment where {project_name} is deployed consists of a set of _confidential_ or _public_ client applications that use {project_name} for authentication. +These clients are _frontend clients_, which may directly redirect user to {project_name} to request browser authentication. The particular client would then receive set of tokens after successful authentication. -_Services_ (_Resource Servers_ in the https://datatracker.ietf.org/doc/html/draft-ietf-oauth-mtls-08#section-4.2[OAuth 2 specification]) are also available that serve requests from client applications and provide resources to these applications. These services require an _Access token_ (Bearer token) to be sent to them to authenticate a request. This token is obtained by the frontend application upon login to {project_name}. +_Services_ (_Resource Servers_ in the https://datatracker.ietf.org/doc/html/draft-ietf-oauth-mtls-08#section-4.2[OAuth 2 specification]) are also available that serve requests from client applications and provide resources to these applications. +These services require an _Access token_ (Bearer token) to be sent to them from _frontend application_ or from other service to authenticate a request. -In the environment where trust among services is low, you may encounter this scenario: +The care must be taken to make sure that access tokens have limited privileges and the particular access token cannot be misused by the service to access other third-party services. +In the environment where trust among services is low, you may encounter this example scenario: -. A frontend client application requires authentication against {project_name}. +. A frontend client application `frontend-client` requires authentication against {project_name}. . {project_name} authenticates a user. -. {project_name} issues a token to the application. +. {project_name} issues a token to the application `frontend-client`. -. The application uses the token to invoke an untrusted service. +. The `frontend-client` application uses the token to invoke a service `service1`. -. The untrusted service returns the response to the application. However, it keeps the applications token. +. The `service1` service returns the response to the application. But assume that this service will try to misuse the token and keep it for the further use. -. The untrusted service then invokes a trusted service using the applications token. This results in broken security as the untrusted service misuses the token to access other services on behalf of the client application. +. The `service1` then invokes another service `service2` using the applications token, which was previously sent to it. The `service2` does not check that token was not supposed to be +used to invoke it and it will serve the request and return successful response. This results in broken security as the `service1` misused the token to access other services on behalf of the client application `frontend-client`. -This scenario is unlikely in environments with a high level of trust between services but not in environments where trust is low. In some environments, this workflow may be correct as the untrusted service may have to retrieve data from a trusted service to return data to the original client application. +This scenario is unlikely in environments with a high level of trust between services but not in environments where trust is low. -An unlimited audience is useful when a high level of trust exists between services. Otherwise, the audience should be limited. You can limit the audience and, at the same time, allow untrusted services to retrieve data from trusted services. In this case, ensure that the untrusted service and the trusted service are added as audiences to the token. +To prevent any misuse of the access token, the access token can contain the claim `aud`, which represents the audience. The claim `aud` should typically represent client ids of all services where the token +is supposed to be used. In the environments with low trust among services, it is recommended to: -To prevent any misuse of the access token, limit the audience on the token and configure your services to verify the audience on the token. The flow will change as follows: +* Limit the audience on the token to make sure that access tokens contain just limited amount of audiences. -. A frontend application authenticates against {project_name}. +* Configure your services to verify the audience on the token. + +To prevent `service1` from the example above to misuse the token, the secure variant of the flow may instead look like this: + +. A frontend application `frontend-client` authenticates against {project_name}. . {project_name} authenticates a user. -. {project_name} issues a token to the application. The application knows that it will need to invoke an untrusted service so it places *scope=* in the authentication request sent to {project_name} (see <<_client_scopes, Client Scopes section>> for more details about the _scope_ parameter). +. {project_name} issues a token to the `frontend-client` application. The `frontend-client` knows that it will need to invoke `service1` so it places `scope=service1-scope` in the authentication request sent to {project_name}. +The scope `service1-scope` is a <<_client_scopes,Client scope>>, which may need to be created by administrator. In the <<_audience_setup,sections below>> there are some options how to setup such a client scope. +The token claim will look like: + -The token issued to the application contains a reference to the untrusted service in its audience (*"audience": [ "" ]*) which declares that the client uses this access token to invoke the untrusted service. +[source,json] +---- +"aud": "service1" +---- + -.The untrusted service serves the request to the client application but also keeps the token. +This declares that the client can use this access token to invoke the `service1`. -. The untrusted service invokes a trusted service with the token. Invocation is not successful because the trusted service checks the audience on the token and find that its audience is only for the untrusted service. This behavior is expected and security is not broken. +. The `frontend-client` application uses the token to invoke a service `service1`. -If the client wants to invoke the trusted service later, it must obtain another token by reissuing the SSO login with *scope=*. The returned token will then contain the trusted service as an audience: +. The `service1` serves the request to the client application `frontend-application`. But assume that this service will try to misuse the token and keep it for the further use. +. The `service1` will then try to invoke a `service2` with the token. Invocation is not successful because the `service2` service checks the audience on the token and find that its audience is only for the `service1`. Hence `service2` will reject the request and will return an error to `service1`. This behavior is expected and security is not broken. + +== Ability for the service to call another service + +In some environments, it may be desired that the `service1` may have to retrieve additional data from a `service2` to return data to the original client application `frontend-client`. In order to make this +possible to work, there are few possibilities: + +* Make sure that initial access token issued to `frontend-client` will contain both `service1` and `service2` as audiences. Assuming that there are proper client scopes set, the `frontend-client` can possibly use +the `scope=service1-scope service2-scope` as a value of the `scope` parameter. The issued token would then contain the `aud` claim like: ++ [source,json] ---- -"audience": [ "" ] +"aud": [ "service1", "service2" ] ---- -Use this value to invoke the **. ++ +Such access token can be used to invoke both `service1` or `service2`. Hence `service1` will be able to successfully use such token to invoke `service2` to retrieve additional data. +* The previous approach with both services in the token audience allows that `service1` is allowed to invoke `service2`. However it means that `frontend-client` can also directly use his access token to invoke `service2`. +This may not be desired in some cases. You may want `service1` to be able to invoke `service2`, but at the same time, you do not want `frontend-client` to be able to directly invoke `service2`. The solution +to such scenario might be the use of the link:{securing_apps_token_exchange_link}[Token exchange]. In that case, the initial token would still have only `service1` as an audience. +However once the token is sent to `service1`, the `service1` may send Token exchange request to exchange the token for another token, which would have `service2` as an audience. Please see +the link:{securing_apps_token_exchange_link}[{securing_apps_token_exchange_name}] for the details on how to use it. + +[[_audience_setup]] == Setup When setting up audience checking: * Ensure that services are configured to check audience on the access token sent to them. This may be done in a way specific to your client OIDC adapter, which you are using to secure your OIDC client application. -* Ensure that access tokens issued by {project_name} contain all necessary audiences. Audiences can be added using the client roles as described in the <<_audience_resolve, next section>> or hardcoded. See <<_audience_hardcoded, Hardcoded audience>>. +* Ensure that access tokens issued by {project_name} contain all necessary audiences. ++ +Audiences can be added to the token by two ways: ++ +** Using the client roles as described in the <<_audience_resolve, Audience resolve section>>. ++ +** Hardcoded audience as described in the <<_audience_hardcoded, Hardcoded audience section>>. [[_audience_resolve]] -== Automatically add audience +== Automatically add audience based on client roles -An _Audience Resolve_ protocol mapper is defined in the default client scope _roles_. The mapper checks for clients that have at least one client role available for the current token. The client ID of each client is then added as an audience, which is useful +An _Audience Resolve_ protocol mapper is defined in the default client scope _roles_. The mapper checks for clients that have at least one client role available for the current token. The client ID of each such client is then added as an audience, which is useful if your service clients rely on client roles. Service client could be usually a client without any flows enabled, which may not have any tokens issued directly to itself. It represents an OAuth 2 _Resource Server_. -For example, for a service client and a confidential client, -you can use the access token issued for the confidential client to invoke the service client REST service. The service client will be automatically added as an audience to the access token issued for the confidential client if the following are true: +The <<_oidc_token_role_mappings,Token role mappings section>> contains the details about how are client roles added into the token. Please also see the example below. -* The service client has any client roles defined on itself. +=== Example - token role mappings and audience claim -* Target user has at least one of those client roles assigned. +Here are the example steps how to use the client roles to make `aud` claim added to the token: -* Confidential client has the role scope mappings for the assigned role. +. Create a <> `service1`. It may be possible to disable *Standard flow* or any other flows for this client +as it is a service client, which may never directly authenticate by itself. The possible exception might be *Standard Token Exchange* switch if needed as described above. -[NOTE] -==== -If you want to ensure that the audience is not added automatically, do not configure role scope mappings directly on the confidential client. Instead, you can create a dedicated client scope that contains the role scope mappings for the client roles of your dedicated client scope. +. Go to *Roles* tab of that client and create client role `service1-role`. -Assuming that the client scope is added as an optional client scope to the confidential client, the client roles and the audience will be added to the token if explicitly requested by the *scope=* parameter. -==== +. Create user `john` in the same realm and assign him the client role `service1-role` of client `service1` created in the previous step. +<> contains some details on how to do it. + +. Create client scope named `service1-scope`. It can be marked with *Include in token scope* as *ON*. See <<_client_scopes,this section>> for the details on how to create and set new client scope. + +. Go to the tab *Scope* of the `service1-scope` and add the role `service1-role` of the client `service1` to the <<_role_scope_mappings,Role scope mappings>> of this client scope + +. Create another client `frontend-client` in the realm. + +. Click to the tab *Client scopes* of this client and select the first dedicated client scope `frontend-client-dedicated` and then go to the tab *Scope* and disable *Full scope allowed* switch + +. Go back to the tab *Client scopes* of this client and click *Add client scope* and link the `service1-scope` as *Optional*. See <<_client_scopes_linking, Client Scopes Linking section>> for more details. + +. Click the sub-tab *Evaluate* in the *Client scopes* as described in <<_client_scopes_evaluate,this section>>. When filling user `john` and the subtab *Generated access token*, it can be seen that +there is not any `aud` claim as there are not any client roles in the generated example token. However when adding also the scope `service1-scope` to the *Scope* field, it can be seen that there is client +role `service1-role` as it is in *Role scope mappings* of the `service1-scope` and also in the role mappings of the user `john`. Due to that the `aud` claim will also contain `service1`. + +.Audience resolve example +image:images/audience_resolving_evaluate.png[] + +If you want the `service1` audience to be always applied for the tokens issued to the `frontend-client` client (without using the parameter `scope=service1-scope`), it can be fine to instead do any of these: + +* Assign the `service1-scope` as *Default* client scope rather than *Optional* + +* Add the role scope mapping of the `service1-role` directly to the <<_client_scopes_dedicated,Dedicated client scope>> of the client. In this case, you will not need the `service1-scope` at all. + +Note that since this approach is based on client roles, it also requires that user himself (user `john` in the example above) is a member of some client role of the client `service1`. Otherwise if there +are not any client roles assigned, the audience `service1` will not be included. If you want audience to be included regardless of client roles, see the <<_audience_hardcoded,Hardcoded audience>> section instead. [NOTE] ==== @@ -91,28 +153,31 @@ You can use any custom value, for example a URL, if you want to use a different You can add the protocol mapper directly to the frontend client. If the protocol mapper is added directly, the audience will always be added as well. -For more control over the protocol mapper, you can create the protocol mapper on the dedicated client scope, which will be called for example *good-service*. +For more control over the protocol mapper, you can create the protocol mapper on the dedicated client scope, which will be called for example *service2*. -.Audience protocol mapper -image:images/audience_mapper.png[] +Here the example steps for the hardcoded audience -* From the <<_client_installation, Client details tab>> of the *good-service* client, you can generate the adapter configuration and confirm that _verify-token-audience_ is set to *true*. This action forces the adapter to verify the audience if you use this configuration. +. Create a client `service2` -* You need to ensure that the confidential client is able to request *good-service* as an audience in its tokens. -+ -On the confidential client: -+ -. Click the _Client Scopes_ tab. -. Assign *good-service* as an optional (or default) client scope. -+ -See <<_client_scopes_linking, Client Scopes Linking section>> for more details. +. Create a client scope `service2-scope`. -* You can optionally <<_client_scopes_evaluate, Evaluate Client Scopes>> and generate an example access token. *good-service* will be added to the audience of the generated access token if *good-service* is included in the _scope_ parameter, when you assigned it as an optional client scope. +. In the tab *Mappers* of that client scope, select *Configure a new mapper* and select *Audience* -* In your confidential client application, ensure that the _scope_ parameter is used. The value *good-service* must be included when you want to issue the token for accessing *good-service*. +. Select *Included Client Audience* as a `service2` and save the mapper + -See: +.Audience protocol mapper +image:images/audience_mapper.png[] + -** *Keycloak JavaScript adapter* in the link:{securing_apps_link}[securing apps] section if your application uses the javascript adapter. +. Link the newly created client scope with some client. For example it can be linked as *Optional* client scope to the client `frontend-client` created in the <<_audience_resolve,previous example>>. + +. You can optionally <<_client_scopes_evaluate, Evaluate Client Scopes>> for the client where the client scope was linked (For example `frontend-client`) and generate an example access token. +The audience `service2` will be added to the audience of the generated access token if `service2-scope` is included in the _scope_ parameter, when you assigned it as an optional client scope. + +In your confidential client application, ensure that the _scope_ parameter is used. The value like _scope=service2-scope_ must be included when you want to issue the token for accessing `service2`. + +See in the link:{securing_apps_base_link}/javascript-adapter[{project_name} JavaScript adapter] section if your application uses the javascript adapter for how to send the _scope_ parameter with the desired value. + +If you prefer to not include `scope` parameter in your requests, you can instead link the `service2-scope` as a *Default* client scope or use the client dedicated scope where you configure this mapper. +This is useful if you want to always apply the audience for all the authentication request of OIDC client `frontend-client`. -NOTE: Both the _Audience_ and _Audience Resolve_ protocol mappers add the audiences to the access token only, by default. The ID Token typically contains only a single audience, the client ID for which the token was issued, a requirement of the OpenID Connect specification. However, the access token does not necessarily have the client ID, which was the token issued for, unless the audience mappers added it. +NOTE: Both the _Audience_ and _Audience Resolve_ protocol mappers add the audiences to the access token only, by default. The ID Token typically contains only a single audience, the client ID for which the token was issued, a requirement of the OpenID Connect specification. However, the access token does not necessarily have the client ID, which was the token issued for, unless the _Audience_ mapper added it. diff --git a/docs/documentation/server_admin/topics/clients/oidc/con-basic-settings.adoc b/docs/documentation/server_admin/topics/clients/oidc/con-basic-settings.adoc index 442ce36ea066..571c8bdf3291 100644 --- a/docs/documentation/server_admin/topics/clients/oidc/con-basic-settings.adoc +++ b/docs/documentation/server_admin/topics/clients/oidc/con-basic-settings.adoc @@ -66,6 +66,8 @@ For client-side clients that perform browser logins. As it is not possible to en *Service account roles*:: If enabled, this client can authenticate to {project_name} and retrieve access token dedicated to this client. In terms of OAuth2 specification, this enables support of `Client Credentials Grant` for this client. +*Standard Token Exchange*:: If enabled, this client can use the link:{securing_apps_token_exchange_link}#_standard-token-exchange[Standard token exchange]. + *Auth 2.0 Device Authorization Grant*:: If enabled, this client can use the OIDC xref:con-oidc-auth-flows_server_administration_guide[Device Authorization Grant]. *OIDC CIBA Grant*:: If enabled, this client can use the OIDC xref:con-oidc-auth-flows_{context}[Client Initiated Backchannel Authentication Grant]. diff --git a/docs/documentation/server_admin/topics/clients/oidc/con-confidential-client-credentials.adoc b/docs/documentation/server_admin/topics/clients/oidc/con-confidential-client-credentials.adoc index ab28bb5c5923..251a4b18e00d 100644 --- a/docs/documentation/server_admin/topics/clients/oidc/con-confidential-client-credentials.adoc +++ b/docs/documentation/server_admin/topics/clients/oidc/con-confidential-client-credentials.adoc @@ -19,6 +19,8 @@ image:images/client-credentials-jwt.png[Signed JWT] *Signed JWT* is "Signed JSON Web Token". +In this authenticator you can enforce the *Signature algorithm* used by the client (any algorithm is valid by default) and the *Max expiration* allowed for the JWT token (tokens received after this period will not be accepted because they are too old, note that tokens should be issued right before the authentication, 60 seconds by default). + When choosing this credential type you will have to also generate a private key and certificate for the client in the tab `Keys`. The private key will be used to sign the JWT, while the certificate is used by the server to verify the signature. .Keys tab @@ -63,6 +65,8 @@ If you select this option, you can use a JWT signed by client secret instead of The client secret will be used to sign the JWT by the client. +Like in the *Signed JWT* authenticator you can configure the *Signature algorithm* and the *Max expiration* for the JWT token. + *X509 Certificate* {project_name} will validate if the client uses proper X509 certificate during the TLS Handshake. diff --git a/docs/documentation/server_admin/topics/clients/oidc/con-token-role-mappings.adoc b/docs/documentation/server_admin/topics/clients/oidc/con-token-role-mappings.adoc new file mode 100644 index 000000000000..994539cdfb26 --- /dev/null +++ b/docs/documentation/server_admin/topics/clients/oidc/con-token-role-mappings.adoc @@ -0,0 +1,80 @@ +[[_oidc_token_role_mappings]] += Role mappings in the token + +When a user authenticates, there are some roles that are added to the access token. By default, the <> are added to the access +token into the `realm_access` claim. The <> are added by default to the `resource_access` claim. + +The roles added to the token are an intersection of: + +* Roles, that are <<_oidc_token_role_mappings_user_roles,assigned to the user>>. + +* <<_role_scope_mappings,Role scope mappings>> of the roles that the client is permitted to access + +[[_oidc_token_role_mappings_user_roles]] +== Roles assigned to the user + +Roles assigned to the user can be defined in the Role mappings as described in <>. Few details: + +* In case that a user is a member of some <>, then all the roles of these groups are also applied. + +* In case that a role is a <<_composite-roles,composite role>>, the child roles of the composite role are also applied. In the token, the list of the roles is expanded and would contain all the roles. + +* In case that the authenticated user is not a normal user, but a <<_service_accounts,Service account>>, which represents a client, then the service account roles are used. The service account roles are defined +in the tab *Service accounts roles* of the particular client. + +== Role protocol mappers + +Similarly to other claims, the roles are added to the access token issued for the client by the dedicated <<_protocol-mappers,Protocol mappers>>. There is a <<_client_scopes_protocol,Built-in client scope *roles*>> +defined in the realm. Since it is a <>, it is defined by default as a <<_client_scopes_linking,Default client scope>> for every realm client. +You can see this client scope in the admin console by looking at the tab *Client scopes* and then looking for the *roles* client scope. This client scope contains these protocol mappers by default: + +* The protocol mapper *realm roles* - This protocol mapper is used to add the realm roles to the token claim. By default, the configuration looks like this: + +.Realm roles mapper +image:images/mapper-oidc-realm-roles.png[] + +* The protocol mapper *client roles* - This protocol mapper is used to add the client roles to the token claim. By default, the configuration looks like this: + +.Client roles mapper +image:images/mapper-oidc-client-roles.png[] + +* The protocol mapper *audience resolve* - This protocol mapper is used to fill the `aud` claim in the access token based on the applied client roles. The details about this mapper are in the <<_audience_resolve,Audience resolve section>>. + +As you can see in the configuration of realm roles and client roles mappers, it is possible to configure: + +* If roles are added just to the access token or also to other tokens, like for example the ID token. By default, roles are added to the access token and to the introspection endpoint. + +* What are the claims where the roles would be added. By default, the realm roles are added to the `realm_access` claim. So, for example, the claim in the JWT token containing 2 realm roles `role1` and `role2` will look similar to this: ++ +[source,json] +---- +"realm_access": { + "roles": [ "role1", "role2" ] +} +---- ++ +The client roles are added to the `resource_access` token claim by default. This claim will look like this in the token, which contains +client roles `manage-account` and `manage-account-links` of client `account` and client role `target-client1-role` of the client `target-client1`: ++ +[source,json] +---- +"resource_access": { + "target-client1": { + "roles": [ "target-client1-role" ] + }, + "account": { + "roles": [ "manage-account", "manage-account-links" ] + } +} +---- + +By adjusting the configuration option *Token claim name* of the role protocol mappers, it is possible to specify that these roles will be added to the token in the configured claim. + +If you want to update the role claims just for one specific client (For example, client `foo` expects the realm roles in the claim `my-realm-roles` instead of the claim `realm_access`), then it is +possible to remove the default client scope *roles* from your client and instead configure the realm/client protocol mapper in the <<_client_scopes_dedicated,dedicated client scope>> of your client. + +== Example + +The <<_audience_resolve,Audience documentation>> contains a more detailed example, which covers some details about the role mappings and about the audience (Claim `aud`) added to the token. Also, it can be +useful to try the <<_client_scopes_evaluate,Client scopes evaluation>> to see what are the effective scopes, protocol mappers and role scope mappings used when issuing the token for the particular client +and how the JWT tokens would look like for the particular combination of user, client, and applied client scopes. diff --git a/docs/documentation/server_admin/topics/clients/oidc/proc-using-a-service-account.adoc b/docs/documentation/server_admin/topics/clients/oidc/proc-using-a-service-account.adoc index f78124a02316..0e3dc03225b3 100644 --- a/docs/documentation/server_admin/topics/clients/oidc/proc-using-a-service-account.adoc +++ b/docs/documentation/server_admin/topics/clients/oidc/proc-using-a-service-account.adoc @@ -8,16 +8,16 @@ Each OIDC client has a built-in _service account_. Use this _service account_ to .Prerequisites .Procedure -. Click *Clients* in the menu. +. Click *Clients* in the menu. . Select your client. . Click the *Settings* tab. . Toggle <<_access-type, Client authentication>> to *On*. -. Select *Service accounts roles*. +. Select *Service accounts roles* checkbox to make sure it is enabled. . Click *Save*. . Configure your <<_client-credentials, client credentials>>. -. Click the *Scope* tab. -. Verify that you have roles or toggle *Full Scope Allowed* to *ON*. -. Click the *Service Account Roles* tab +. Click the *Client Scopes* tab, select the dedicated client scope (usually first client scope in the list, more details <<_client_scopes_dedicated,in this section>>) and select *Scope* tab of the client scope. +. Verify that you have roles or toggle *Full Scope Allowed* to *ON*. Note that this switch is useful only for the development purposes and in the production, it is recommended to disable this switch and properly configure role scopes. The details about this switch are described in <<_role_scope_mappings, this section>> and in <<_oidc_token_role_mappings,this section>>. +. Click the *Service Account Roles* tab of your client . Configure the roles available to this service account for your client. Roles from access tokens are the intersection of: @@ -43,6 +43,16 @@ For example, the POST invocation to retrieve a service account can look like thi grant_type=client_credentials ---- +Note that the value of `cHJvZHVjdC1zYS1jbGllbnQ6cGFzc3dvcmQ=` used in the `Authorization` header is Base64 encoded value of clientId and clientSecret +in the format prescribed by the `Authorization: Basic` header. In this example, the client ID is `product-sa-client` and the client secret was `password` and hence the value was obtained for example +by this command in the Unix platform: +[source,bash] +---- +echo 'product-sa-client:password' | base64 +---- +Instead of using the header `Authorization: Basic`, it is also possible to send the credentials as parameters `client_id` and `client_secret` of the POST request. For other client credentials methods, +the format of the parameters would be different as described above. + The response would be similar to this https://datatracker.ietf.org/doc/html/rfc6749#section-4.4.3[Access Token Response] from the OAuth 2.0 specification. [source] @@ -54,9 +64,10 @@ Cache-Control: no-store Pragma: no-cache { - "access_token":"2YotnFZFEjr1zCsicMWpAA", - "token_type":"bearer", - "expires_in":60 + "access_token":"eyJhbGciOiJSUzI1NiIs...", + "token_type":"Bearer", + "expires_in":60, + "scope": "email profile" } ---- diff --git a/docs/documentation/server_admin/topics/clients/oidc/service-accounts.adoc b/docs/documentation/server_admin/topics/clients/oidc/service-accounts.adoc deleted file mode 100644 index 4d1c5e87bfda..000000000000 --- a/docs/documentation/server_admin/topics/clients/oidc/service-accounts.adoc +++ /dev/null @@ -1,57 +0,0 @@ -[[_service_accounts]] - -==== Service Accounts - -Each OIDC client has a built-in _service account_ which allows it to obtain an access token. -This is covered in the OAuth 2.0 specification under <<_client_credentials_grant,Client Credentials Grant>>. -To use this feature you must set the <<_access-type, Access Type>> of your client to `confidential`. When you do this, -the `Service Accounts Enabled` switch is displayed. You need to toggle this switch to ON. Also make sure that you have -configured your <<_client-credentials, client credentials>>. - -To use it you must have registered a valid `confidential` Client and you need to check the switch `Service Accounts Enabled` in {project_name} admin console for this client. -In tab `Service Account Roles` you can configure the roles available to the service account retrieved on behalf of this client. -Remember that you must have the roles available in Role Scope Mappings (tab `Scope`) of this client as well, unless you -have `Full Scope Allowed` on. As in a normal login, roles from access token are the intersection of: - -* Role scope mappings of particular client combined with the role scope mappings inherited from linked client scopes -* Service account roles - -The REST URL to invoke on is `{kc_realms_path}/{realm-name}/protocol/openid-connect/token`. -Invoking on this URL is a POST request and requires you to post the client credentials. -By default, client credentials are represented by clientId and clientSecret of the client in `Authorization: Basic` header, but you can also authenticate the client with a signed JWT assertion or any other custom mechanism for client authentication. -You also need to use the parameter `grant_type=client_credentials` as per the OAuth2 specification. - -For example the POST invocation to retrieve a service account can look like this: - -[source] ----- - - POST {kc_realms_path}/demo/protocol/openid-connect/token - Authorization: Basic cHJvZHVjdC1zYS1jbGllbnQ6cGFzc3dvcmQ= - Content-Type: application/x-www-form-urlencoded - - grant_type=client_credentials ----- -The response would be this https://datatracker.ietf.org/doc/html/rfc6749#section-4.4.3[standard JSON document] from the OAuth 2.0 specification. - -[source] ----- - -HTTP/1.1 200 OK -Content-Type: application/json;charset=UTF-8 -Cache-Control: no-store -Pragma: no-cache - -{ - "access_token":"2YotnFZFEjr1zCsicMWpAA", - "token_type":"bearer", - "expires_in":60 -} ----- - -There is the only access token returned by default. There is no refresh token returned and there is also no user session created -on the {project_name} side upon successful authentication by default. Due to the lack of a refresh token, there is a need to re-authenticate when access token expires, -however this does not mean any additional overhead on {project_name} server side due the fact that sessions are not created by default. - -Due to this, there is no need for logout, however issued access tokens can be revoked by sending request to the OAuth2 Revocation Endpoint described -in the <<_oidc-endpoints, OpenID Connect Endpoints>> section. diff --git a/docs/documentation/server_admin/topics/clients/proc-evaluating-client-scopes.adoc b/docs/documentation/server_admin/topics/clients/proc-evaluating-client-scopes.adoc index a58e77447dcd..47fa358c438d 100644 --- a/docs/documentation/server_admin/topics/clients/proc-evaluating-client-scopes.adoc +++ b/docs/documentation/server_admin/topics/clients/proc-evaluating-client-scopes.adoc @@ -10,12 +10,15 @@ The *Mappers* tab contains the protocol mappers and the *Scope* tab contains the This will also show you the value of the *scope* parameter. This parameter needs to be sent from the application to the {project_name} OpenID Connect authorization endpoint. -.Evaluating client scopes -image:images/client-scopes-evaluate.png[] - [NOTE] ==== -To send a custom value for a *scope* parameter from your application, see the *Keycloak JavaScript adapter* in the link:{securing_apps_link}[securing apps] section, for javascript adapters. +If your application uses the {securing_apps_base_link}/javascript-adapter[{project_name} JavaScript adapter], see its section to learn how to send the *scope* parameter with the desired value. ==== +You can also simulate how the access token, ID token, or UserInfo response issued to this client looks for a particular selected user and for a specific value of the `audience` parameter. Note +that the `audience` parameter is currently only supported for the token exchange grant. It is recommended to leave it empty when simulating any other grant. + +.Evaluating client scopes +image:images/client-scopes-evaluate.png[] + All examples are generated for the particular user and issued for the particular client, with the specified value of the *scope* parameter. The examples include all of the claims and role mappings used. diff --git a/docs/documentation/server_admin/topics/clients/proc-updating-default-scopes.adoc b/docs/documentation/server_admin/topics/clients/proc-updating-default-scopes.adoc index d3cd7a919808..58f62926e7f6 100644 --- a/docs/documentation/server_admin/topics/clients/proc-updating-default-scopes.adoc +++ b/docs/documentation/server_admin/topics/clients/proc-updating-default-scopes.adoc @@ -3,15 +3,7 @@ [role="_abstract"] Use *Realm Default Client Scopes* to define sets of client scopes that are automatically linked to newly created clients. -.Procedure -. Click the *Client Scopes* tab for the client. -ifeval::[{project_product}==true] -. Click *Default Client Scopes*. -endif::[] - -From here, select the client scopes that you want to add as *Default Client Scopes* to newly created clients and *Optional Client Scopes*. - -.Default client scopes -image:images/client-scopes-default.png[] +To see the realm default client scopes, click the *Client Scopes* tab on the left side of the admin console. In the *Assigned type* column, you can specify whether a particular client scope should be added as +a *Default Client Scope* or an *Optional Client Scope* to newly created clients. See <<_client_scopes_linking, this section>> for details on what _default_ and _optional_ client scopes are. When a client is created, you can unlink the default client scopes, if needed. This is similar to removing <<_default_roles, Default Roles>>. diff --git a/docs/documentation/server_admin/topics/events/admin.adoc b/docs/documentation/server_admin/topics/events/admin.adoc index 73fc84912d86..0461946e3c7d 100644 --- a/docs/documentation/server_admin/topics/events/admin.adoc +++ b/docs/documentation/server_admin/topics/events/admin.adoc @@ -35,9 +35,9 @@ You can now view admin events. .Admin events image:images/admin-events.png[Admin events] -When the `Include Representation` switch is ON, it can lead to storing a lot of information in the database. You can set a maximum length of the representation by using the `--spi-events-store-jpa-max-field-length` argument. This setting is useful if you want to adhere to the underlying storage limitation. For example: +When the `Include Representation` switch is ON, it can lead to storing a lot of information in the database. You can set a maximum length of the representation by using the `--spi-events-store--jpa--max-field-length` argument. This setting is useful if you want to adhere to the underlying storage limitation. For example: [source,bash] ---- -kc.[sh|bat] --spi-events-store-jpa-max-field-length=2500 +kc.[sh|bat] --spi-events-store--jpa--max-field-length=2500 ---- \ No newline at end of file diff --git a/docs/documentation/server_admin/topics/events/login.adoc b/docs/documentation/server_admin/topics/events/login.adoc index 534842298994..0d6762ed59b5 100644 --- a/docs/documentation/server_admin/topics/events/login.adoc +++ b/docs/documentation/server_admin/topics/events/login.adoc @@ -184,7 +184,7 @@ To change the log level used by the Logging Event listener, add the following: [source,bash] ---- -bin/kc.[sh|bat] start --spi-events-listener-jboss-logging-success-level=info --spi-events-listener-jboss-logging-error-level=error +bin/kc.[sh|bat] start --spi-events-listener-jboss-logging-success-level=info --spi-events-listener--jboss-logging--error-level=error ---- The valid values for log levels are `debug`, `info`, `warn`, `error`, and `fatal`. @@ -200,6 +200,11 @@ The Email Event Listener sends a message to the user's email address when an eve * Update Credential. * Remove Credential. +Below are the optional events you can configure: + +* User disabled by permanent lockout. +* User disabled by temporary lockout. + The following conditions need to be met for an email to be sent: * User has an email address. @@ -221,10 +226,15 @@ To enable the Email Listener: .Event listeners image:images/event-listeners.png[Event listeners] -You can exclude events by using the `--spi-events-listener-email-exclude-events` argument. For example: +You can exclude events by using the `--spi-events-listener--email--exclude-events` argument. For example: [source,bash] ---- -kc.[sh|bat] --spi-events-listener-email-exclude-events=UPDATE_CREDENTIAL,REMOVE_CREDENTIAL +kc.[sh|bat] --spi-events-listener--email--exclude-events=UPDATE_CREDENTIAL,REMOVE_CREDENTIAL ---- +To enable optional events, use the following command: +[source,bash] +---- +kc.[sh|bat] --spi-events-listener--email--include-events=USER_DISABLED_BY_TEMPORARY_LOCKOUT_ERROR,USER_DISABLED_BY_PERMANENT_LOCKOUT +---- diff --git a/docs/documentation/server_admin/topics/identity-broker/configuration.adoc b/docs/documentation/server_admin/topics/identity-broker/configuration.adoc index c20e05de98f0..fea974e1b62e 100644 --- a/docs/documentation/server_admin/topics/identity-broker/configuration.adoc +++ b/docs/documentation/server_admin/topics/identity-broker/configuration.adoc @@ -54,6 +54,10 @@ Although each type of identity provider has its configuration options, all share |Trust Email |When *ON*, {project_name} trusts email addresses from the identity provider. If the realm requires email validation, users that log in from this identity provider do not need to perform the email verification process. +If the target identity provider supports email verification and advertises this information when returning the user profile information, the email of the federated user will be (un)marked as verified. +For instance, an OpenID Connect Provider returning a `email_verified` claim in their ID Tokens. +Note that this setting will set the email as verified when the user is federated for the first time and on subsequent logins +through the broker if the sync mode is set to `FORCE`. |GUI Order |The sort order of the available identity providers on the login page. diff --git a/docs/documentation/server_admin/topics/identity-broker/oauth2.adoc b/docs/documentation/server_admin/topics/identity-broker/oauth2.adoc new file mode 100644 index 000000000000..05b63dc24322 --- /dev/null +++ b/docs/documentation/server_admin/topics/identity-broker/oauth2.adoc @@ -0,0 +1,90 @@ + +[[_identity_broker_oauth]] +=== OAuth v2 identity providers + +{project_name} brokers identity providers based on the OAuth v2 protocol. These identity providers (IDPs) must support the xref:con-oidc-auth-flows_{context}[Authorization Code Flow] defined in the specification to authenticate users and authorize access. + + +.Procedure +. Click *Identity Providers* in the menu. +. From the `Add provider` list, select `OAuth v2`. ++ +. Enter your initial configuration options. See <<_general-idp-config, General IDP Configuration>> for more information about configuration options. ++ +.OAuth2 settings +|=== +|Configuration|Description + +|Authorization URL +|The authorization URL endpoint. + +|Token URL +|The token URL endpoint. + +|User Info URL +|An endpoint from where information about the user will be fetched from. When invoking this endpoint, {project_name} will send +the request with the access token issued by the identity provider as a bearer token. As a result, it expects the response to be a +JSON document with the claims that should be used to obtain user profile information like ID, username, email, and first and last names. + +|Client Authentication +|Defines the Client Authentication method {project_name} uses with the Authorization Code Flow. In the case of JWT signed with a private key, {project_name} uses the realm private key. In the other cases, define a client secret. See the https://openid.net/specs/openid-connect-core-1_0.html#ClientAuthentication[Client Authentication specifications] for more information. + +|Client ID +|A realm acting as an OIDC client to the external IDP. The realm must have an OIDC client ID if you use the Authorization Code Flow to interact with the external IDP. + +|Client Secret +|Client secret from an external <<_vault-administration,vault>>. This secret is necessary if you are using the Authorization Code Flow. + +|Client Assertion Signature Algorithm +|Signature algorithm to create JWT assertion as client authentication. +In the case of JWT signed with private key or Client secret as jwt, it is required. If no algorithm is specified, the following algorithm is adapted. `RS256` is adapted in the case of JWT signed with private key. `HS256` is adapted in the case of Client secret as jwt. + +|Client Assertion Audience +|The audience to use for the client assertion. The default value is the IDP's token endpoint URL. + +|Default Scopes +|A space separated list of scopes {project_name} sends with the authentication request. + +|Prompt +|The prompt parameter in the OIDC specification. Through this parameter, you can force re-authentication and other options. See the specification for more details. + +|Accepts prompt=none forward from client +|Specifies if the IDP accepts forwarded authentication requests containing the `prompt=none` query parameter. If a realm receives an auth request with `prompt=none`, the realm checks if the user is currently authenticated and returns a `login_required` error if the user has not logged in. When {project_name} determines a default IDP for the auth request (using the `kc_idp_hint` query parameter or having a default IDP for the realm), you can forward the auth request with `prompt=none` to the default IDP. The default IDP checks the authentication of the user there. Because not all IDPs support requests with `prompt=none`, {project_name} uses this switch to indicate that the default IDP supports the parameter before redirecting the authentication request. + +If the user is unauthenticated in the IDP, the client still receives a `login_required` error. If the user is authentic in the IDP, the client can still receive an `interaction_required` error if {project_name} must display authentication pages that require user interaction. This authentication includes required actions (for example, password change), consent screens, and screens set to display by the `first broker login` flow or `post broker login` flow. + +|Requires short state parameter +|This switch needs to be enabled if identity provider does not support long value of the `state` parameter sent in the initial OAuth2 authorization request (EG. more than 100 characters). In this case, {project_name} will try to make shorter `state` parameter and may omit some client data to be sent in the initial request. This may result in the limited functionality in some very corner case scenarios (EG. in case that IDP redirects to {project_name} with the error in the OAuth2 authorization response, {project_name} might need to display error page instead of being able to redirect to the client in case that login session is expired). + +|=== + +After the user authenticates to the identity provider and is redirected back to {project_name}, the broker will fetch the user profile information from the endpoint defined in the `User Info URL` setting. For that, +{project_name} will invoke that endpoint using the access token issued by the identity provider as a bearer token. Even though the OAuth2 standard supports access tokens using a JWT format, this broker assumes access tokens are opaque and that user profile information should be obtained from a separate endpoint. + +In order to map the claims from the JSON document returned by the user profile endpoint, you might want to set the following settings so that they are mapped to user attributes when federating the user: + +.User profile claims +|=== +|Configuration|Description + +|ID Claim +|The name of the claim from the JSON document returned by the user profile endpoint representing the user's unique identifier. If not provided, defaults to `sub`. + +|Username Claim +|The name of the claim from the JSON document returned by the user profile endpoint representing the user's username. If not provided, defaults to `preferred_username`. + +|Email Claim +|The name of the claim from the JSON document returned by the user profile endpoint representing the user's email. If not provided, defaults to `email`. + +|Name Claim +|The name of the claim from the JSON document returned by the user profile endpoint representing the user's full name. If not provided, defaults to `name`. + +|Given name Claim +|The name of the claim from the JSON document returned by the user profile endpoint representing the user's given name. If not provided, defaults to `given_name`. + +|Family name Claim +|The name of the claim from the JSON document returned by the user profile endpoint representing the user's family name. If not provided, defaults to `family_name`. + +|=== + +You can import all this configuration data by providing a URL or file that points to the Authorization Server Metadata. If you connect to a {project_name} external IDP, you can import the IDP settings from `{kc_realms_path}/{realm-name}/.well-known/openid-configuration`. This link is a JSON document describing metadata about the IDP. diff --git a/docs/documentation/server_admin/topics/identity-broker/oidc.adoc b/docs/documentation/server_admin/topics/identity-broker/oidc.adoc index d152e5912bad..439741dc5849 100644 --- a/docs/documentation/server_admin/topics/identity-broker/oidc.adoc +++ b/docs/documentation/server_admin/topics/identity-broker/oidc.adoc @@ -63,6 +63,9 @@ In the case of JWT signed with private key or Client secret as jwt, it is requir If the user is unauthenticated in the IDP, the client still receives a `login_required` error. If the user is authentic in the IDP, the client can still receive an `interaction_required` error if {project_name} must display authentication pages that require user interaction. This authentication includes required actions (for example, password change), consent screens, and screens set to display by the `first broker login` flow or `post broker login` flow. +|Requires short state parameter +|This switch needs to be enabled if identity provider does not support long value of the `state` parameter sent in the initial OIDC authentication request (EG. more than 100 characters). In this case, {project_name} will try to make shorter `state` parameter and may omit some client data to be sent in the initial request. This may result in the limited functionality in some very corner case scenarios (EG. in case that IDP redirects to {project_name} with the error in the OIDC authentication response, {project_name} might need to display error page instead of being able to redirect to the client in case that login session is expired). + |Validate Signatures |Specifies if {project_name} verifies signatures on the external ID Token signed by this IDP. If *ON*, {project_name} must know the public key of the external OIDC IDP. For performance purposes, {project_name} caches the public key of the external OIDC identity provider. @@ -82,4 +85,4 @@ If the user is unauthenticated in the IDP, the client still receives a `login_re You can import all this configuration data by providing a URL or file that points to OpenID Provider Metadata. If you connect to a {project_name} external IDP, you can import the IDP settings from `{kc_realms_path}/{realm-name}/.well-known/openid-configuration`. This link is a JSON document describing metadata about the IDP. -If you want to use https://datatracker.ietf.org/doc/html/rfc7516[Json Web Encryption (JWE)] ID Tokens or UserInfo responses in the provider, the IDP needs to know the public key to use with {project_name}. The provider uses the <> defined for the different encryption algorithms to decrypt the tokens. {project_name} provides a standard xref:con-server-oidc-uri-endpoints_{context}[JWKS endpoint] which the IDP can use for downloading the keys automatically. \ No newline at end of file +If you want to use https://datatracker.ietf.org/doc/html/rfc7516[Json Web Encryption (JWE)] ID Tokens or UserInfo responses in the provider, the IDP needs to know the public key to use with {project_name}. The provider uses the <> defined for the different encryption algorithms to decrypt the tokens. {project_name} provides a standard xref:con-server-oidc-uri-endpoints_{context}[JWKS endpoint] which the IDP can use for downloading the keys automatically. diff --git a/docs/documentation/server_admin/topics/identity-broker/social/gitlab.adoc b/docs/documentation/server_admin/topics/identity-broker/social/gitlab.adoc index 25374cf65c12..57bb0fe3bbc9 100644 --- a/docs/documentation/server_admin/topics/identity-broker/social/gitlab.adoc +++ b/docs/documentation/server_admin/topics/identity-broker/social/gitlab.adoc @@ -9,7 +9,7 @@ image:images/gitlab-add-identity-provider.png[Add Identity Provider] + . Copy the value of *Redirect URI* to your clipboard. -. In a separate browser tab, https://docs.gitlab.com/ee/integration/oauth_provider.html[add a new GitLab application]. +. In a separate browser tab, https://docs.gitlab.com/integration/oauth_provider/[add a new GitLab application]. .. Use the *Redirect URI* in your clipboard as the *Redirect URI*. .. Note the *Application ID* and *Secret* when you save the application. . In {project_name}, paste the value of the `Application ID` into the *Client ID* field. diff --git a/docs/documentation/server_admin/topics/identity-broker/social/instagram.adoc b/docs/documentation/server_admin/topics/identity-broker/social/instagram.adoc index 598efa96a69b..ab21a32cb94a 100644 --- a/docs/documentation/server_admin/topics/identity-broker/social/instagram.adoc +++ b/docs/documentation/server_admin/topics/identity-broker/social/instagram.adoc @@ -1,6 +1,9 @@ ==== Instagram +IMPORTANT: The Instagram Identity Broker is deprecated for removal. Prefer using the Facebook Identity Broker instead. +To enable it, start the server with `--features=instagram-broker`. + .Procedure . Click *Identity Providers* in the menu. . From the *Add provider* list, select *Instagram*. @@ -25,7 +28,7 @@ image:images/meta-select-app-type.png[Select an app type] .Create an app image:images/meta-create-app.png[Create an app] + -.. Fill in all required fields. +.. Fill in all required fields. .. Click *Create app*. Meta then brings you to the dashboard. .. In the navigation panel, select *App settings* - *Basic*. .. Select *+ Add Platform* at the bottom of the page. diff --git a/docs/documentation/server_admin/topics/identity-broker/social/openshift.adoc b/docs/documentation/server_admin/topics/identity-broker/social/openshift.adoc index 1fc37c0bd2ab..37114423f737 100644 --- a/docs/documentation/server_admin/topics/identity-broker/social/openshift.adoc +++ b/docs/documentation/server_admin/topics/identity-broker/social/openshift.adoc @@ -5,7 +5,19 @@ . A {project_name} server configured in order to use the truststore. For more information, see the https://www.keycloak.org/server/keycloak-truststore[Configuring a Truststore] {section}. .Procedure -. Click *Identity Providers* in the menu. +. Locate the Openshift 4 instance's API URL by using this command: ++ +[source,bash,subs=+attributes] +---- +oc cluster-info +---- +. Look for the URL in a line that has this format: ++ +[source,bash,subs=+attributes] +---- +Kubernetes master is running at https://api.:6443 +---- +. In the Admin Console, click *Identity Providers* in the menu. . From the *Add provider* list, select *Openshift v4*. . Enter the *Client ID* and *Client Secret* and in the *Base URL* field, enter the API URL of your OpenShift 4 instance. Additionally, you can copy the *Redirect URI* to your clipboard. + diff --git a/docs/documentation/server_admin/topics/login-settings/forgot-password.adoc b/docs/documentation/server_admin/topics/login-settings/forgot-password.adoc index c41bc808b783..5b37e6e9feca 100644 --- a/docs/documentation/server_admin/topics/login-settings/forgot-password.adoc +++ b/docs/documentation/server_admin/topics/login-settings/forgot-password.adoc @@ -27,7 +27,7 @@ image:images/forgot-password-page.png[Forgot Password Page] The text sent in the email is configurable. See link:{developerguide_link}[{developerguide_name}] for more information. -When users click the email link, {project_name} asks them to update their password, and if they have set up an OTP generator, {project_name} asks them to reconfigure the OTP generator. Depending on security requirements of your organization, you may not want users to reset their OTP generator through email. +When users click the email link, {project_name} asks them to update their password, and if they have set up an OTP generator, {project_name} asks them to reconfigure the OTP generator. For security reasons, the flow forces federated users to login again after the reset credentials and keeps internal database users logged in if the same authentication session (same browser) is used. Depending on the security requirements of your organization, you can change the default behavior. To change this behavior, perform these steps: @@ -40,6 +40,11 @@ To change this behavior, perform these steps: image:images/reset-credentials-flow.png[Reset Credentials Flow] + If you do not want to reset the OTP, set the `Reset - Conditional OTP` sub-flow requirement to *Disabled*. ++ +.Send Reset Email Configuration +image:images/reset-credential-email-config.png[Send Reset Email Configuration] ++ +If you want to change default behavior for the force login option, click the *Send Reset Email* settings icon in the flow, define an *Alias*, and select the best *Force login after reset* option for you (`true`, always force re-authentication, `false`, keep the user logged in if the same browser was used, `only-federated`, default value that forces login again only for federated users). . Click *Authentication* in the menu. . Click the *Required actions* tab. . Ensure *Update Password* is enabled. diff --git a/docs/documentation/server_admin/topics/oid4vci/vc-issuer-configuration.adoc b/docs/documentation/server_admin/topics/oid4vci/vc-issuer-configuration.adoc new file mode 100644 index 000000000000..de285d885ea4 --- /dev/null +++ b/docs/documentation/server_admin/topics/oid4vci/vc-issuer-configuration.adoc @@ -0,0 +1,474 @@ +[[_oid4vci]] +== Configuring {project_name} as a Verifiable Credential Issuer + +[IMPORTANT] +==== +This is an experimental feature and should not be used in production. Backward compatibility is not guaranteed, and future updates may introduce breaking changes. +==== + +{project_name} provides experimental support for https://openid.net/specs/openid-4-verifiable-credential-issuance-1_0.html[OpenID for Verifiable Credential Issuance]. + +=== Introduction + +This chapter provides step-by-step instructions for configuring {project_name} as a Verifiable Credential Issuer using the OpenID for Verifiable Credential Issuance (OID4VCI) protocol. It outlines the process for setting up a {project_name} instance to securely issue and manage Verifiable Credentials (VCs), supporting decentralized identity solutions. + +=== What are Verifiable Credentials (VCs)? + +Verifiable Credentials (VCs) are cryptographically signed, tamper-evident data structures that represent claims about an entity, such as a person, organization, or device. They are foundational to decentralized identity systems, allowing secure and privacy-preserving identity verification without reliance on centralized authorities. VCs support advanced features like selective disclosure and zero-knowledge proofs, enhancing user privacy and security. + +=== What is OID4VCI? + +OpenID for Verifiable Credential Issuance (OID4VCI) is an extension of the OpenID Connect (OIDC) protocol. It defines a standardized, interoperable framework for credential issuers to deliver VCs to holders, who can then present them to verifiers. OID4VCI leverages {project_name}'s existing authentication and authorization capabilities to streamline VC issuance. + +=== Scope of This Chapter + +This chapter covers the following technical configurations: + +- Creating a dedicated realm for VC issuance. +- Setting up a test user for credential testing. +- Configuring custom cryptographic keys for signing and encrypting VCs. +- Defining realm attributes to specify VC metadata. +- Establishing client scopes and mappers to include user attributes in VCs. +- Registering a client to handle VC requests. +- Verifying the configuration using the issuer metadata endpoint. + +=== Prerequisites + +Ensure the following requirements are met before configuring {project_name} as a Verifiable Credential Issuer: + +=== {project_name} Instance + +A running {project_name} server with the OID4VCI feature enabled. + +To enable the feature, add the following flag to the startup command: + +[source,bash] +---- +--features=oid4vc-vci +---- + +Verify activation by checking the server logs for the `OID4VC_VCI` initialization message. + +=== Configuring Credential Issuance in Keycloak + +In {project_name}, Verifiable Credentials are managed through *ClientScopes*, with each ClientScope representing a single Verifiable Credential type. To enable the issuance of a credential, the corresponding ClientScope must be assigned to an OpenID Connect client - ideally as *optional*. + +During the OAuth2 authorization process, the credential-specific scope can be requested by including the ClientScope's name in the `scope` parameter of the authorization request. Once the user has successfully authenticated, the resulting Access Token *MUST* include the requested ClientScope in its `scope` claim. To ensure this, make sure the ClientScope option *Include in token scope* is enabled. + +With this Access Token, the Verifiable Credential can be issued at the Credential Endpoint. + +=== Authentication + +An access token is required to authenticate API requests. + +Refer to the following {project_name} documentation sections for detailed steps on: + +- <> +- <<_oidc-auth-flows-direct, Obtaining an Access Token>> + +=== Configuration Steps + +Follow these steps to configure {project_name} as a Verifiable Credential Issuer. Each section is detailed with procedures, explanations, and examples where applicable. + +=== Creating a Realm + +A realm in {project_name} is a logical container that manages users, clients, roles, and authentication flows. +For Verifiable Credential (VC) issuance, create a dedicated realm to ensure isolation and maintain a clear separation of functionality. + +[NOTE] +==== +For detailed instructions on creating a realm, refer to the {project_name} documentation: +<>. +==== + +=== Creating a User Account + +A test user is required to simulate credential issuance and verify the setup. + +[NOTE] +==== +For step-by-step instructions on creating a user, refer to the {project_name} documentation: +<>. +==== + +Ensure that the user has a valid username, email, and password. If the password should not be reset upon first login, disable the "Temporary" toggle during password configuration. + +=== Key Management Configuration + +{project_name} uses cryptographic keys for signing and encrypting Verifiable Credentials (VCs). To ensure secure and standards-compliant issuance, configure **ECDSA (ES256) for signing**, **RSA (RS256) for signing**, and **RSA-OAEP for encryption** using a keystore. + +[NOTE] +==== +For a detailed guide on configuring realm keys, refer to the {project_name} documentation: +<>. +==== + +==== Configuring Key Providers + +To enable cryptographic operations for VC issuance: + +- **ECDSA (ES256) Key**: Used for signing VCs with the ES256 algorithm. +- **RSA (RS256) Key**: Alternative signing mechanism using RS256. +- **RSA-OAEP Key**: Used for encrypting sensitive data in VCs. + +Each key must be registered as a **java-keystore provider** within the **Realm Settings** > **Keys** section, ensuring: +- The keystore file is correctly specified and securely stored. +- The appropriate algorithm (ES256, RS256, or RSA-OAEP) is selected. +- The key is active, enabled, and configured with the correct usage (signing or encryption). +- Priority values are set to define precedence among keys. + +[WARNING] +==== +Ensure the keystore file is **securely stored** and accessible to the {project_name} server. Use **strong passwords** to protect both the keystore and the private keys. +==== + +=== Registering Realm Attributes + +Realm attributes define metadata for Verifiable Credentials (VCs), such as **expiration times, supported formats, and scope definitions**. These attributes allow {project_name} to issue VCs with predefined settings. + +Since the **{project_name} Admin Console does not support direct attribute creation**, use the **{project_name} Admin REST API** to configure these attributes. + +==== Define Realm Attributes + +Create a JSON file (e.g., `realm-attributes.json`) with the following content: + +[source,json] +---- +{ + "realm": "oid4vc-vci", + "enabled": true, + "attributes": { + "preAuthorizedCodeLifespanS": 120 + } +} +---- + +==== Attribute Breakdown + +The attributes section contains issuer-specific metadata: +- **preAuthorizedCodeLifespanS** – Defines how long pre-authorized codes remain valid (in seconds). + +==== Import Realm Attributes + +Use the following `curl` command to import the attributes into {project_name}: + +[source,bash] +---- +curl -X PUT "https://localhost:8443/admin/realms/oid4vc-vci" \ + -H "Authorization: Bearer $ACCESS_TOKEN" \ + -H "Content-Type: application/json" \ + -d @realm-attributes.json +---- + +[NOTE] +==== +- Replace `$ACCESS_TOKEN` with a valid **{project_name} Admin API access token**. +- **Avoid using `-k` in production**; instead, configure a **trusted TLS certificate**. +==== + +=== Create Client Scopes with Mappers + +Client scopes define **which user attributes** are included in Verifiable Credentials (VCs). Therefore, they are considered the Verifiable Credential configuration itself. These scopes use **protocol mappers** to map specific claims into VCs and the protocol mappers will also contain the corresponding metadata for claims that is displayed at the Credential Issuer Metadata Endpoint. + +You can create the ClientScopes using the {project_name} web Administration Console, but the web Administration Console does not yet support adding metadata configuration. For metadata configuration, you will need to use the Admin REST API. + +==== Define a Client Scope with a Mapper + +Create a JSON file (e.g., `client-scopes.json`) with the following content: + +[source,json] +---- +{ + "name": "vc-scope-mapping", + "protocol": "oid4vc", + "attributes": { + "include.in.token.scope": "true", + "vc.issuer_did": "did:web:vc.example.com", + "vc.credential_configuration_id": "my-credential-configuration-id", + "vc.credential_identifier": "my-credential-identifier", + "vc.format": "jwt_vc", + "vc.expiry_in_seconds": 31536000, + "vc.verifiable_credential_type": "my-vct", + "vc.supported_credential_types": "credential-type-1,credential-type-2", + "vc.credential_contexts": "context-1,context-2", + "vc.proof_signing_alg_values_supported": "ES256", + "vc.cryptographic_binding_methods_supported": "jwk", + "vc.signing_key_id": "key-id-123456", + "vc.display": "[{\"name\": \"IdentityCredential\", \"logo\": {\"uri\": \"https://university.example.edu/public/logo.png\", \"alt_text\": \"a square logo of a university\"}, \"locale\": \"en-US\", \"background_color\": \"#12107c\", \"text_color\": \"#FFFFFF\"}]", + "vc.sd_jwt.number_of_decoys": "2", + "vc.credential_build_config.sd_jwt.visible_claims": "iat,jti,nbf,exp,given_name", + "vc.credential_build_config.hash_algorithm": "SHA-256", + "vc.credential_build_config.token_jws_type": "JWS", + "vc.include_in_metadata": "true" + }, + "protocolMappers": [ + { + "name": "academic_title-mapper-bsk", + "protocol": "oid4vc", + "protocolMapper": "oid4vc-static-claim-mapper", + "config": { + "claim.name": "academic_title", + "staticValue": "N/A" + } + }, + { + "name": "givenName", + "protocol": "oid4vc", + "protocolMapper": "oid4vc-user-attribute-mapper", + "config": { + "claim.name": "given_name", + "userAttribute": "firstName", + "vc.mandatory": "false", + "vc.display": "[{\"name\": \"الاسم الشخصي\", \"locale\": \"ar-SA\"}, {\"name\": \"Vorname\", \"locale\": \"de-DE\"}, {\"name\": \"Given Name\", \"locale\": \"en-US\"}, {\"name\": \"Nombre\", \"locale\": \"es-ES\"}, {\"name\": \"نام\", \"locale\": \"fa-IR\"}, {\"name\": \"Etunimi\", \"locale\": \"fi-FI\"}, {\"name\": \"Prénom\", \"locale\": \"fr-FR\"}, {\"name\": \"पहचानी गई नाम\", \"locale\": \"hi-IN\"}, {\"name\": \"Nome\", \"locale\": \"it-IT\"}, {\"name\": \"名\", \"locale\": \"ja-JP\"}, {\"name\": \"Овог нэр\", \"locale\": \"mn-MN\"}, {\"name\": \"Voornaam\", \"locale\": \"nl-NL\"}, {\"name\": \"Nome Próprio\", \"locale\": \"pt-PT\"}, {\"name\": \"Förnamn\", \"locale\": \"sv-SE\"}, {\"name\": \"مسلمان نام\", \"locale\": \"ur-PK\"}]" + } + } + ] +} +---- + +[NOTE] +==== +This is a **sample configuration**. +You can define **additional protocol mappers** to support different claim mappings, such as: + +- Dynamic attribute values instead of static ones. +- Mapping multiple attributes per credential type. +- Alternative supported credential types. +==== + +From the example above: + +- It is important to set `include.in.token.scope=true`, see <>. +- Most of the named attributes above are optional. See below: <>. +- You can determine the appropriate `protocolMapper` names by first creating them through the Web Administration Console and then retrieving their definitions via the Admin REST API. + +==== Attribute Breakdown - ClientScope [[client-scope-attribute-breakdown]] + +[cols="1,1,2", options="header"] +|=== +| Property +| Required +| Description / Default + +| `name` +| required +| Name of the client scope. + +| `protocol` +| required +| Protocol used by the client scope. Use `oid4vc` for OpenID for Verifiable Credential Issuance, which is an OAuth2 extension (like `openid-connect`). + +| `include.in.token.scope` +| required +| [[include.in.token.scope]] This value MUST be `true`. It ensures that the scope’s name is included in the `scope` claim of the issued Access Token. + +| `protocolMappers` +| optional +| Defines how claims are mapped into the credential and how metadata is exposed via the issuer’s metadata endpoint. + +| `vc.issuer_did` +| optional +| The Decentralized Identifier (DID) of the issuer. + +_Default_: `$\{name}` + +| `vc.credential_configuration_id` +| optional +| The credentials configuration ID. + +_Default_: `$\{name}+` + +| `vc.credential_identifier` +| optional +| The credentials identifier. + +_Default_: `$\{name}+` + +| `vc.format` +| optional +| Defines the VC format (e.g., `jwt_vc`). + +_Default_: `vc+sd-jwt` + +| `vc.verifiable_credential_type` +| optional +| The Verifiable Credential Type (VCT). + +_Default_: `$\{name}+` + +| `vc.supported_credential_types` +| optional +| The type values of the Verifiable Credential Type. + +_Default_: `$\{name}+` + +| `vc.credential_contexts` +| optional +| The context values of the Verifiable Credential Type. + +_Default_: `$\{name}+` + +| `vc.proof_signing_alg_values_supported` +| optional +| Supported signature algorithms for this credential. + +_Default_: All present keys supporting JWS algorithms in the realm. + +| `vc.cryptographic_binding_methods_supported` +| optional +| Supported cryptographic methods (if applicable). + +_Default_: `jwk` + +| `vc.signing_key_id` +| optional +| The ID of the key to sign this credential. + +_Default_: _none_ + +| `vc.display` +| optional +| Display information shown in the user's wallet about the issued credential. + +_Default_: _none_ + +| `vc.sd_jwt.number_of_decoys` +| optional +| Used only with format `vc+sd-jwt`. Number of decoy hashes in the SD-JWT. + +_Default_: `10` + +| `vc.credential_build_config.sd_jwt.visible_claims` +| optional +| Used only with format `vc+sd-jwt`. Claims always disclosed in the SD-JWT body. + +_Default_: `id,iat,nbf,exp,jti` + +| `vc.credential_build_config.hash_algorithm` +| optional +| Hash algorithm used before signing the credential. + +_Default_: `SHA-256` + +| `vc.credential_build_config.token_jws_type` +| optional +| JWT type written into the `typ` header of the token. + +_Default_: `JWS` + +| `vc.expiry_in_s` +| optional +| Credential expiration time in seconds. + +_Default_: `31536000` (one year) + +| `vc.include_in_metadata` +| optional +| If this claim should be listed in the credentials metadata. + +_Default_: `true` but depends on the mapper-type. Claims like `jti`, `nbf`, `exp`, etc. are set to `false` by default. +|=== + +==== Attribute Breakdown - ProtocolMappers + +- **name** – Mapper identifier. +- **protocol** – Must be `oid4vc` for Verifiable Credentials. +- **protocolMapper** – Specifies the claim mapping strategy (e.g., `oid4vc-static-claim-mapper`). +- **config**: contains the protocol-mappers specific attributes. + +Most claims are dependent on the `protocolMapper`-value, but there are also commonly used claims available for all ProtocolMappers: + +[cols="1,1,2", options="header"] +|=== +| Property +| Required +| Description / Default + +| `claim.name` +| required +| The name of the attribute that will be added into the Verifiable Credential. + +_Default_: _none_ + +| `userAttribute` +| required +| The name of the users-attribute that will be used to map the value into the `claim.name` of the Verifiable Credential. + +_Default_: _none_ + +| `vc.mandatory` +| optional +| If the credential must be issued with this claim. + +_Default_: `false` + +| `vc.display` +| optional +| Metadata information that is displayed at the credential-issuer metadata-endpoint. + +_Default_: _none_ +|=== + +==== Import the Client Scope + +Use the following `curl` command to import the client scope into {project_name}: + +[source,bash] +---- +curl -X POST "https://localhost:8443/admin/realms/oid4vc-vci/client-scopes" \ + -H "Authorization: Bearer $ACCESS_TOKEN" \ + -H "Content-Type: application/json" \ + -d @client-scopes.json +---- + +[NOTE] +==== +- Replace `$ACCESS_TOKEN` with a valid **{project_name} Admin API access token**. +- **Avoid using `-k` in production**; instead, configure a **trusted TLS certificate**. +- If updating an existing scope, use `PUT` instead of `POST`. +==== + +=== Create the Client + +Set up a client to handle Verifiable Credential (VC) requests and assign the necessary scopes. +The client does not differ from regular OpenID Connect clients — with one exception: it must have the appropriate **optional ClientScopes** assigned that define the Verifiable Credentials it is allowed to issue. + +. Create a JSON file (e.g., `oid4vc-rest-api-client.json`) with the following content: ++ +[source,json] +---- +{ + "clientId": "oid4vc-rest-api", + "enabled": true, + "protocol": "openid-connect", + "publicClient": false, + "serviceAccountsEnabled": true, + "clientAuthenticatorType": "client-secret", + "redirectUris": ["http://localhost:8080/*"], + "directAccessGrantsEnabled": true, + "defaultClientScopes": ["profile"], + "optionalClientScopes": ["vc-scope-mapping"], + "attributes": { + "client.secret.creation.time": "1719785014", + "client.introspection.response.allow.jwt.claim.enabled": "false", + "login_theme": "keycloak", + "post.logout.redirect.uris": "http://localhost:8080" + } +} +---- ++ +- **clientId**: Unique identifier for the client. +- **optionalClientScopes**: Links the `vc-scope-mapping` scope for VC requests. + +. Import the client using the following `curl` command: ++ +[source,bash] +---- +curl -k -X POST "https://localhost:8443/admin/realms/oid4vc-vci/clients" \ + -H "Authorization: Bearer $ACCESS_TOKEN" \ + -H "Content-Type: application/json" \ + -d @oid4vc-rest-api-client.json +---- + +=== Verify the Configuration + +Validate the setup by accessing the **issuer metadata endpoint**: + +. Open a browser or use a tool like `curl` to visit: ++ +[source,bash] +---- +https://localhost:8443/realms/oid4vc-vci/.well-known/openid-credential-issuer +---- + +A successful response returns a JSON object containing details such as: +- **Supported claims** +- **Credential formats** +- **Issuer metadata** + +=== Conclusion + +You have successfully configured **{project_name} as a Verifiable Credential Issuer** using the **OID4VCI protocol**. +This setup leverages {project_name}'s robust **identity management capabilities** to issue secure, **standards-compliant VCs**. + +For a **complete reference implementation**, see the sample project: +https://github.com/adorsys/{project_name}-ssi-deployment/tree/main[{project_name} SSI Deployment^]. diff --git a/docs/documentation/server_admin/topics/organizations/authenticating-members.adoc b/docs/documentation/server_admin/topics/organizations/authenticating-members.adoc index 7a273e26d71d..80caa3edaaf6 100644 --- a/docs/documentation/server_admin/topics/organizations/authenticating-members.adoc +++ b/docs/documentation/server_admin/topics/organizations/authenticating-members.adoc @@ -106,6 +106,18 @@ Change the *first broker login* flow by following these steps: .Organizations first broker flow image:images/organizations-first-broker-flow.png[alt="Organizations first broker flow"] - You should now be able to authenticate using any identity provider associated with an organization and have the user joining the organization as a member as soon as they complete the first browser login flow. + +== Configuring how users authenticate + +If the flow supports organizations, you can configure some of the steps to change how users authenticate to the realm. + +For example, some use cases will require users to authenticate to a realm only if they are a member of any or a specific organization in the realm. + +To enable this behavior, you need to enable the `Requires user membership` setting on the `Organization Identity-First Login` execution step by clicking on its settings. + +If enabled, and after the user provides the username or email in the identity-first login page, the server will +try to resolve a organization where the user is a member by looking at any existing membership or based on the semantics of the <<_mapping_organization_claims_,organization>> scope, +if requested by the client. If not a member of an organization, an error page will be shown. + diff --git a/docs/documentation/server_admin/topics/organizations/managing-identity-providers.adoc b/docs/documentation/server_admin/topics/organizations/managing-identity-providers.adoc index f03caddf8441..103417681284 100644 --- a/docs/documentation/server_admin/topics/organizations/managing-identity-providers.adoc +++ b/docs/documentation/server_admin/topics/organizations/managing-identity-providers.adoc @@ -43,14 +43,16 @@ Hide on login page:: If this identity provider should be hidden in login pages when the user is authenticating in the scope of the organization. Redirect when email domain matches:: -If members should be automatically redirected to the identity provider when their email domain matches the domain set to the identity provider. +If members should be automatically redirected to the identity provider when their email domain matches the domain set to the identity provider. If the domain is set to `Any`, members whose email domain matches *any* of the organization domains will be redirected to the identity provider. + +If the org is linked with multiple identity providers, the organization authenticator prioritizes the provider that matches the email domain of the user for automatic redirection. If none is found, it tries to locate one whose domain is set to `Any`. Once linked to an organization, the identity provider can be managed just like any other in a realm by accessing the *Identity Providers* section in the menu. However, the options herein described are only available when managing the identity provider in the scope of an organization. The only exception is the *Hide on login page* option that is present here for convenience. == Editing a linked identity provider -You can edit any of the organization-related settings of a linked identity provider at any time. +You can edit any of the organization-related settings of a linked identity provider at any time. .Procedure diff --git a/docs/documentation/server_admin/topics/organizations/managing-members.adoc b/docs/documentation/server_admin/topics/organizations/managing-members.adoc index 3b52c6b08354..8fbfea10833a 100644 --- a/docs/documentation/server_admin/topics/organizations/managing-members.adoc +++ b/docs/documentation/server_admin/topics/organizations/managing-members.adoc @@ -121,3 +121,9 @@ When removing a member from an organization, remember that the user may or may n that user is managed or unmanaged member, respectively. For more details, see <<_managed_unmanaged_members_,Managed and unmanaged members>>. + +== Support for federated members + +Users coming from federated providers can also be added as members of an organization. The only exceptions are the users from LDAP providers with *import mode disabled*. Organization members are added to an internal group that is not synchronized with external providers, so even if the LDAP provider has a group mapper with mode LDAP_ONLY it won't be possible for the non-imported users to be added as members of an organization because that membership won't be synced with the LDAP server. + +In other words, LDAP users that are not imported can't join an organization because the membership is not stored in the local DB nor in the LDAP server. So if you want to have LDAP users joining organizations, ensure that the import mode of the LDAP provider is enabled. diff --git a/docs/documentation/server_admin/topics/organizations/mapping-organization-claims.adoc b/docs/documentation/server_admin/topics/organizations/mapping-organization-claims.adoc index 4dec2ba77c88..0e4f9fa0fe18 100644 --- a/docs/documentation/server_admin/topics/organizations/mapping-organization-claims.adoc +++ b/docs/documentation/server_admin/topics/organizations/mapping-organization-claims.adoc @@ -1,5 +1,6 @@ [id="mapping-organization-claims_{context}"] +[[_mapping_organization_claims_]] = Mapping organization claims [role="_abstract"] To map organization-specific claims into tokens, a client needs to request the *organization* scope when sending diff --git a/docs/documentation/server_admin/topics/realms/email.adoc b/docs/documentation/server_admin/topics/realms/email.adoc index aa94bac7186b..43372b78ebfb 100644 --- a/docs/documentation/server_admin/topics/realms/email.adoc +++ b/docs/documentation/server_admin/topics/realms/email.adoc @@ -41,4 +41,93 @@ Encryption:: Tick one of these checkboxes to support sending emails for recovering usernames and passwords, especially if the SMTP server is on an external network. You will most likely need to change the *Port* to 465, the default port for SSL/TLS. Authentication:: - Set this switch to *ON* if your SMTP server requires authentication. When prompted, supply the *Username* and *Password*. The value of the *Password* field can refer a value from an external <<_vault-administration,vault>>. + Set this switch to *ON* if your SMTP server requires authentication. + +Username:: + All authentication-mechanisms require a username. + +Authentication Type:: + Choose the kind of authentication: 'password' or 'token'. + +Password:: + Only needed when *Authentication Type* 'password' is selected. + Supply the *Password*. The value of the *Password* field can refer a value from an external <<_vault-administration,vault>>. + +Auth Token URL:: + Only needed when *Authentication Type* 'token' is selected. + Supply the *Auth Token URL* that is used to fetch a token via client credentials grant. + +Auth Token Scope:: + Only needed when *Authentication Type* 'token' is selected. + Supply the *Auth Token Scope* that is used to fetch a token from the *Auth Token URL*. + +Auth Token ClientId:: + Only needed when *Authentication Type* 'token' is selected. + Supply the *Auth ClientId* that is used to fetch a token from the *Auth Token URL*. + +Auth Token Client Secret:: + Only needed when *Authentication Type* 'token' is selected. + Supply the *Auth Client Secret* that authenticates the client to fetch a token from the *Auth Token URL*. The value of the *Auth Client Secret* field can refer a value from an external <<_vault-administration,vault>>. + +ifeval::[{project_community}==true] + +== XOAUTH2 email configuration with third-party vendors + +The following section contains some hints on how to configure {project_name} email settings to use XOAUTH2 based authentication with some known third-party software SMTP servers. + +NOTE: This section has been contributed by the Keycloak community. As the Keycloak core team does not have means to test third-party providers, it is provided as-is. If you find this documentation outdated or incomplete, please contribute to improve it. + +=== Configuration for Microsoft Azure and Office365 + +Microsoft Azure allows 'Client Credentials Grant' using a client secret to gather an access token. +Microsoft Office365 supports SMTP with XOAUTH2 to authenticate with the gathered token. + +Links to relevant Microsoft documentation: + +- https://learn.microsoft.com/en-us/exchange/permissions-exo/application-rbac[Usage of role base access control for applications in exchange online] +- Settings in https://learn.microsoft.com/en-us/exchange/client-developer/legacy-protocols/how-to-authenticate-an-imap-pop-smtp-application-by-using-oauth[Authenticate an IMAP, POP or SMTP connection using OAuth] + +The following method for setting up {project_name} to send email with Azure and Office365 has been verified by a test. +There might be other variants to achieve the same depending on your environment. + +From:: +`@` + +Host:: +`smtp.office365.com` + +Port:: +`587` + +Encryption:: +Check Start TLS + +Username:: +`@` (might be the same of a different value than the sender value) + +Auth Token Url:: +`+https://login.microsoftonline.com//oauth2/v2.0/token+` ++ +Replace TenantID with the id of your Microsoft tenant, usually a UUID, in Azure or just copy the token url from the list of endpoints displayed in the Azure Console. + +Auth Token Scope:: +`+https://outlook.office.com/.default+` + +Auth Token ClientId:: +`` ++ +Replace ApplicationId with the id of your application in Azure, usually a UUID. + +Auth Token ClientSecret:: +`` + +=== Configuration for Google Mail + +This feature is not yet supported by {project_name}, because Google does not allow client-secrets for the Client Credentials Grant. + +=== Configuration for AWS + +XOAUTH2 is not supported by the AWS-SMTP service. +The AWS-service requires the use of a password. + +endif::[] diff --git a/docs/documentation/server_admin/topics/realms/proc-creating-a-realm.adoc b/docs/documentation/server_admin/topics/realms/proc-creating-a-realm.adoc index 633e4a2c59d6..07fc870acd4d 100644 --- a/docs/documentation/server_admin/topics/realms/proc-creating-a-realm.adoc +++ b/docs/documentation/server_admin/topics/realms/proc-creating-a-realm.adoc @@ -11,13 +11,8 @@ realm and only be able to interact with customer-facing apps. .Procedure -. Click *{project_name}* next to *master realm*, then click *Create Realm*. -+ -.Add realm menu -image:images/add-realm-menu.png[Add realm menu] - +. In the Admin Console, click *Create Realm* next to *Current realm*. . Enter a name for the realm. - . Click *Create*. + .Create realm diff --git a/docs/documentation/server_admin/topics/realms/proc-using-admin-console.adoc b/docs/documentation/server_admin/topics/realms/proc-using-admin-console.adoc index 0c4199c52b62..c2a917f9e040 100644 --- a/docs/documentation/server_admin/topics/realms/proc-using-admin-console.adoc +++ b/docs/documentation/server_admin/topics/realms/proc-using-admin-console.adoc @@ -4,30 +4,44 @@ You configure realms and perform most administrative tasks in the {project_name} .Prerequisites -* You need an administrator account. See xref:creating-first-admin_{context}[Creating the first administrator]. +To use the Admin Console, you need an administrator account. + +* If no administrators exist, see xref:creating-first-admin_{context}[Creating the first administrator]. +* If other administrators exist, ask an administrator to provide an account with privileges to manage realms. .Procedure . Go to the URL for the Admin Console. + For example, for localhost, use this URL: http://localhost:8080{kc_admins_path}/ + +. Enter the username and password you created on the Welcome Page or through environment variables as described in https://www.keycloak.org/server/configuration#_creating_the_initial_admin_user[Creating the initial admin user]. + .Login page image:images/login-page.png[Login page] - -. Enter the username and password you created on the Welcome Page or through environment variables as per https://www.keycloak.org/server/configuration#_creating_the_initial_admin_user[Creating the initial admin user] guide. ++ This action displays the Admin Console. + .Admin Console image:images/admin-console.png[Admin Console] . Note the menus and other options that you can use: -+ -* Click the menu labeled *Master* to pick a realm you want to manage or to create a new one. -+ + +* Click the *Current realm* to see if other realms are available to be managed. + +* Click *Create realm* to create another realm that you can manage. + * Click the top right list to view your account or log out. + +. Click *Realm settings* in the menu to see the fields and options for this realm. ++ +Click a question mark *?* icon to show the definition of a field such as *Frontend URL*. + + -* Hover over a question mark *?* icon to show a tooltip text that describes that field. The image above shows the tooltip in action. -* Click a question mark *?* icon to show a tooltip text that describes that field. The image above shows the tooltip in action. +.Realm settings +image:images/realm-settings.png[Realm settings] -NOTE: Export files from the Admin Console are not suitable for backups or data transfer between servers. Only boot-time exports are suitable for backups or data transfer between servers. +[NOTE] +==== +Export files from the Admin Console are not suitable for backups or data transfer between servers. Only boot-time exports are suitable for backups or data transfer between servers. +==== \ No newline at end of file diff --git a/docs/documentation/server_admin/topics/realms/themes.adoc b/docs/documentation/server_admin/topics/realms/themes.adoc index 44a66b170e38..18807dbd626b 100644 --- a/docs/documentation/server_admin/topics/realms/themes.adoc +++ b/docs/documentation/server_admin/topics/realms/themes.adoc @@ -5,7 +5,7 @@ For a given realm, you can change the appearance of any UI in {project_name} by .Procedure -. Click *Realm setting* in the menu. +. Click *Realm settings* in the menu. . Click the *Themes* tab. + .Themes tab diff --git a/docs/documentation/server_admin/topics/roles-groups/con-role-scope-mappings.adoc b/docs/documentation/server_admin/topics/roles-groups/con-role-scope-mappings.adoc index 25be92fb3cb1..6bdb27602b58 100644 --- a/docs/documentation/server_admin/topics/roles-groups/con-role-scope-mappings.adoc +++ b/docs/documentation/server_admin/topics/roles-groups/con-role-scope-mappings.adoc @@ -6,7 +6,7 @@ [role="_abstract"] On creation of an OIDC access token or SAML assertion, the user role mappings become claims within the token or assertion. Applications use these claims to make access decisions on the resources controlled by the application. {project_name} digitally signs access tokens and applications reuse them to invoke remotely secured REST services. However, these tokens have an associated risk. An attacker can obtain these tokens and use their permissions to compromise your networks. To prevent this situation, use _Role Scope Mappings_. -_Role Scope Mappings_ limit the roles declared inside an access token. When a client requests a user authentication, the access token they receive contains only the role mappings that are explicitly specified for the client's scope. The result is that you limit the permissions of each individual access token instead of giving the client access to all the users permissions. +_Role Scope Mappings_ limit the roles declared inside an access token. When a client requests user authentication, the access token it receives contains only the role mappings that are explicitly specified for the client's scope. The result is that the permissions of each individual access token are limited instead of giving the client access to all the user's permissions. By default, each client gets all the role mappings of the user. You can view the role mappings for a client. @@ -26,3 +26,5 @@ You can also use <<_client_scopes, client scopes>> to define the same role scope .Partial scope image:images/client-scope.png[Partial scope] + +See the <<_oidc_token_role_mappings, Token Role mappings section>> for details about the algorithm that adds the roles to the token. diff --git a/docs/documentation/server_admin/topics/roles-groups/proc-creating-realm-roles.adoc b/docs/documentation/server_admin/topics/roles-groups/proc-creating-realm-roles.adoc index 2834a017a092..9a73621e6b0c 100644 --- a/docs/documentation/server_admin/topics/roles-groups/proc-creating-realm-roles.adoc +++ b/docs/documentation/server_admin/topics/roles-groups/proc-creating-realm-roles.adoc @@ -12,7 +12,4 @@ image:images/roles.png[] . Enter a *Description*. . Click *Save*. -.Add role -image:images/role.png[Add role] - The *description* field can be localized by specifying a substitution variable with `$\{var-name}` strings. The localized value is configured to your theme within the themes property files. See the link:{developerguide_link}[{developerguide_name}] for more details. diff --git a/docs/documentation/server_admin/topics/roles-groups/proc-managing-groups.adoc b/docs/documentation/server_admin/topics/roles-groups/proc-managing-groups.adoc index 662271fb11e3..7a5867a51be9 100644 --- a/docs/documentation/server_admin/topics/roles-groups/proc-managing-groups.adoc +++ b/docs/documentation/server_admin/topics/roles-groups/proc-managing-groups.adoc @@ -14,11 +14,11 @@ If you have a parent group and a child group, and a user that belongs only to th The hierarchy of a group is sometimes represented using the group path. The path is the complete list of names that represents the hierarchy of a specific group, from top to bottom and separated by slashes `/` (similar to files in a File System). For example a path can be `/top/level1/level2` which means that `top` is a top level group and is parent of `level1`, which in turn is parent of `level2`. This path represents unambiguously the hierarchy for the group `level2`. -Because of historical reasons {project_name}, does not escape slashes in the group name itself. Therefore a group named `level1/group` under `top` uses the path `/top/level1/group`, which is misleading. {project_name} can be started with the option `--spi-group-jpa-escape-slashes-in-group-path` to `true` and then the slashes in the name are escaped with the character `~`. The escape char marks that the slash is part of the name and has no hierarchical meaning. The previous path example would be `/top/level1~/group` when escaped. +Because of historical reasons {project_name}, does not escape slashes in the group name itself. Therefore a group named `level1/group` under `top` uses the path `/top/level1/group`, which is misleading. {project_name} can be started with the option `--spi-group--jpa--escape-slashes-in-group-path` to `true` and then the slashes in the name are escaped with the character `~`. The escape char marks that the slash is part of the name and has no hierarchical meaning. The previous path example would be `/top/level1~/group` when escaped. [source,bash] ---- -bin/kc.[sh|bat] start --spi-group-jpa-escape-slashes-in-group-path=true +bin/kc.[sh|bat] start --spi-group--jpa--escape-slashes-in-group-path=true ---- The following example includes a top-level *Sales* group and a child *North America* subgroup. diff --git a/docs/documentation/server_admin/topics/sessions.adoc b/docs/documentation/server_admin/topics/sessions.adoc index dc6c95720423..dfd332528ba0 100644 --- a/docs/documentation/server_admin/topics/sessions.adoc +++ b/docs/documentation/server_admin/topics/sessions.adoc @@ -1,4 +1,5 @@ +[[managing-user-sessions]] == Managing user sessions When users log into realms, {project_name} maintains a user session for each user and remembers each client visited by the user within the session. Realm administrators can perform multiple actions on each user session: diff --git a/docs/documentation/server_admin/topics/sessions/offline.adoc b/docs/documentation/server_admin/topics/sessions/offline.adoc index f023cbb947ac..ab47f7e24bb4 100644 --- a/docs/documentation/server_admin/topics/sessions/offline.adoc +++ b/docs/documentation/server_admin/topics/sessions/offline.adoc @@ -24,20 +24,4 @@ Clients can request an offline token by adding the parameter `scope=offline_acce {project_name} will limit its internal cache for offline user and offline client sessions to 10000 entries by default, which will reduce the overall memory usage for offline sessions. Items which are evicted from memory will be loaded on-demand from the database when needed. -To set different sizes for the caches, edit {project_name}'s cache config file to set a `++` for those caches. - -If you disabled feature `persistent-user-sessions`, it is possible to reduce memory requirements using a configuration option that shortens lifespan for imported offline sessions. Such sessions will be evicted from the Infinispan caches after the specified lifespan, but still available in the database. This will lower memory consumption, especially for deployments with a large number of offline sessions. - -To specify the lifespan override for offline user sessions, start {project_name} server with the following parameter: - -[source,bash] ----- ---spi-user-sessions-infinispan-offline-session-cache-entry-lifespan-override= ----- - -Similarly for offline client sessions: - -[source,bash] ----- ---spi-user-sessions-infinispan-offline-client-session-cache-entry-lifespan-override= ----- +See the server configuration guide to change this default. diff --git a/docs/documentation/server_admin/topics/sso-protocols/con-oidc-auth-flows.adoc b/docs/documentation/server_admin/topics/sso-protocols/con-oidc-auth-flows.adoc index 5b2978d4db2a..076632f810cf 100644 --- a/docs/documentation/server_admin/topics/sso-protocols/con-oidc-auth-flows.adoc +++ b/docs/documentation/server_admin/topics/sso-protocols/con-oidc-auth-flows.adoc @@ -154,7 +154,7 @@ The CIBA grant uses the following two providers. [source,bash,subs="attributes+"] ---- -kc.[sh|bat] start --spi-ciba-auth-channel-ciba-http-auth-channel-http-authentication-channel-uri=https://backend.internal.example.com{kc_base_path} +kc.[sh|bat] start --spi-ciba-auth-channel--ciba-http-auth-channel--http-authentication-channel-uri=https://backend.internal.example.com{kc_base_path} ---- The configurable items and their description follow. diff --git a/docs/documentation/server_admin/topics/sso-protocols/oidc.adoc b/docs/documentation/server_admin/topics/sso-protocols/oidc.adoc index 309860596ad8..177b1daf65ce 100644 --- a/docs/documentation/server_admin/topics/sso-protocols/oidc.adoc +++ b/docs/documentation/server_admin/topics/sso-protocols/oidc.adoc @@ -157,7 +157,7 @@ The CIBA grant uses the following two providers. [source,bash,subs="attributes+"] ---- -kc.[sh|bat] start --spi-ciba-auth-channel-ciba-http-auth-channel-http-authentication-channel-uri=https://backend.internal.example.com{kc_base_path} +kc.[sh|bat] start --spi-ciba-auth-channel--ciba-http-auth-channel--http-authentication-channel-uri=https://backend.internal.example.com{kc_base_path} ---- The configurable items and their description follow. diff --git a/docs/documentation/server_admin/topics/threat/auth-sessions-limit.adoc b/docs/documentation/server_admin/topics/threat/auth-sessions-limit.adoc index c76dc72143f0..a7e1d4621aac 100644 --- a/docs/documentation/server_admin/topics/threat/auth-sessions-limit.adoc +++ b/docs/documentation/server_admin/topics/threat/auth-sessions-limit.adoc @@ -25,7 +25,7 @@ The following example shows how to limit the number of active `AuthenticationSes [source,bash] ---- -bin/kc.[sh|bat] start --spi-authentication-sessions-infinispan-auth-sessions-limit=100 +bin/kc.[sh|bat] start --spi-authentication-sessions--infinispan--auth-sessions-limit=100 ---- ifeval::[{project_community}==true] @@ -33,6 +33,6 @@ The equivalent command for the new map storage: [source,bash] ---- -bin/kc.[sh|bat] start --spi-authentication-sessions-map-auth-sessions-limit=100 +bin/kc.[sh|bat] start --spi-authentication-sessions--map--auth-sessions-limit=100 ---- endif::[] diff --git a/docs/documentation/server_admin/topics/threat/read-only-attributes.adoc b/docs/documentation/server_admin/topics/threat/read-only-attributes.adoc index ef762efd2598..e6c7de36b32a 100644 --- a/docs/documentation/server_admin/topics/threat/read-only-attributes.adoc +++ b/docs/documentation/server_admin/topics/threat/read-only-attributes.adoc @@ -29,11 +29,11 @@ This is the list of the read-only attributes, which are used internally by the { System administrators have a way to add additional attributes to this list. The configuration is currently available at the server level. -You can add this configuration by using the `spi-user-profile-declarative-user-profile-read-only-attributes` and `spi-user-profile-declarative-user-profile-admin-read-only-attributes` options. For example: +You can add this configuration by using the `spi-user-profile--declarative-user-profile--read-only-attributes` and `spi-user-profile--declarative-user-profile--admin-read-only-attributes` options. For example: [source,bash,options="nowrap"] ---- -kc.[sh|bat] start --spi-user-profile-declarative-user-profile-read-only-attributes=foo,bar* +kc.[sh|bat] start --spi-user-profile--declarative-user-profile--read-only-attributes=foo,bar* ---- For this example, users and administrators would not be able to update attribute `foo`. Users would not be able to edit any attributes starting with the `bar`. diff --git a/docs/documentation/server_admin/topics/user-federation.adoc b/docs/documentation/server_admin/topics/user-federation.adoc index 41d4a1c6b036..baa263f6db1a 100644 --- a/docs/documentation/server_admin/topics/user-federation.adoc +++ b/docs/documentation/server_admin/topics/user-federation.adoc @@ -18,7 +18,7 @@ To add a storage provider, perform the following procedure: .User federation image:images/user-federation.png[User federation] + -. Select the provider type card from the listed cards. +. Choose to add a *Kerberos* or *LDAP* provider + {project_name} brings you to that provider's configuration page. diff --git a/docs/documentation/server_admin/topics/user-federation/ldap.adoc b/docs/documentation/server_admin/topics/user-federation/ldap.adoc index ab0ddb91b6ca..965bc0ba54bd 100644 --- a/docs/documentation/server_admin/topics/user-federation/ldap.adoc +++ b/docs/documentation/server_admin/topics/user-federation/ldap.adoc @@ -17,6 +17,9 @@ image:images/user-federation.png[User federation] . Click *Add LDAP providers*. + {project_name} brings you to the LDAP configuration page. ++ +.Add LDAP provider +image:images/user-fed-ldap.png[User federation] ==== Storage mode @@ -76,6 +79,14 @@ Toggle this switch to *ON* if you want new users created by {project_name} added Allow Kerberos authentication:: Enable Kerberos/SPNEGO authentication in the realm with user data provisioned from LDAP. For more information, see the <<_kerberos,Kerberos section>>. +Remove invalid users during searches:: +Remove users from the local database if they are not available from the user storage when executing searches. If this is true, users no longer available from their corresponding user storage will be deleted from the local database whenever trying to look up users. If false, then users previously imported from the user storage will be kept in the local database, as read-only and disabled, even if that user is no longer available from the user storage. For example, user was deleted directly from LDAP or the `Users DN` is invalid. Note that this behavior will only happen when the user is not yet cached. + +Relative User Creation DN:: +Relative DN from the `Users DN` where new users will be created. This allows users to be created in a sub-DN of the parent `Users DN` when using a `subtree` search scope. For example, if the `Users DN` is set to `ou=people,dc=myorg,dc=com` and the `Relative User Creation DN` is set to `ou=engineering`, users will be fetched from the `Users DN` and all sub-DNs, but new users will be stored in `ou=engineering,ou=people,dc=myorg,dc=com`. In other words, {project_name} concatenates the `Relative User Creation DN` with the `Users DN` (a comma is added automatically when concatenating the DNs) and uses this resulting DN to store users + +A similar property is also available in the group and role mappers, allowing groups and roles to be added to a sub-DN of the base DN that is used to search for the groups/roles. + Other options:: Hover the mouse pointer over the tooltips in the Admin Console to see more details about these options. @@ -148,7 +159,7 @@ User Attribute mappers that map basic {project_name} user attributes, such as us When {project_name} updates a password, {project_name} sends the password in plain-text format. This action is different from updating the password in the built-in {project_name} database, where {project_name} hashes and salts the password before sending it to the database. For LDAP, {project_name} relies on the LDAP server to hash and salt the password. -By default, LDAP servers such as MSAD, RHDS, or FreeIPA hash and salt passwords. Other LDAP servers such as OpenLDAP or ApacheDS store the passwords in plain-text unless you use the _LDAPv3 Password Modify Extended Operation_ as described in https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.3[RFC3062]. Enable the LDAPv3 Password Modify Extended Operation in the LDAP configuration page. See the documentation of your LDAP server for more details. +By default, LDAP servers such as MSAD, RHDS, or FreeIPA hash and salt passwords. Other LDAP servers such as OpenLDAP store the passwords in plain-text unless you use the _LDAPv3 Password Modify Extended Operation_ as described in https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.3[RFC3062]. Enable the LDAPv3 Password Modify Extended Operation in the LDAP configuration page. See the documentation of your LDAP server for more details. https://directory.apache.org/apacheds/advanced-ug/4.1.1.4-ss-password-hash.html[Configure ApacheDS to hash and salt passwords automatically] by enabling the passwordHashing interceptor. WARNING: Always verify that user passwords are properly hashed and not stored as plaintext by inspecting a changed directory entry using `ldapsearch` and base64 decode the `userPassword` attribute value. diff --git a/docs/documentation/server_admin/topics/users/con-aia.adoc b/docs/documentation/server_admin/topics/users/con-aia.adoc index 63c60203f09a..b4bc11576bc5 100644 --- a/docs/documentation/server_admin/topics/users/con-aia.adoc +++ b/docs/documentation/server_admin/topics/users/con-aia.adoc @@ -45,14 +45,17 @@ by checking the claims like `acr` in the tokens. In case the user is already authenticated due to an active SSO session, that user usually does not need to actively re-authenticate. However, if that user actively authenticated longer than five minutes ago, the client can still request re-authentication when some AIA is requested. Exceptions exist from this guideline as follows: +* For every required action it is possible to configure the max age on the required action itself in the <>. + If the policy is not configured, it defaults to five minutes. + * The action `delete_account` will always require the user to actively re-authenticate -* The action `update_password` might require the user to actively re-authenticate according to the configured <>. +* The action `UPDATE_PASSWORD` might require the user to actively re-authenticate according to the configured <>. In case the policy is not configured, it is also possible to configure it on the required action itself in the <> when configuring the particular required action. If the policy is not configured in any of those places, it defaults to five minutes. * If you want to use a shorter re-authentication, you can still use a parameter query parameter such as `max_age` with the specified shorter value or eventually `prompt=login`, which will always require user to -actively re-authenticate as described in the OIDC specification. Note that using `max_age` for a longer value than the default five minutes (or the one prescribed by password policy) is not supported. +actively re-authenticate as described in the OIDC specification. Note that using `max_age` for a longer value than the default five minutes (or the one specifically configured for the required action) is not supported. The `max_age` can be currently used only to make the value shorter than the default five minutes. * If <<_step-up-flow,Step-up authentication>> is enabled and the action is to add or delete a credential, authentication is required with the level corresponding @@ -65,7 +68,7 @@ In the same manner, deleting an existing 2nd-factor credential (`otp` or `webaut Some AIA can require the parameter to be sent together with the action name. For instance, the `Delete Credential` action can be triggered only by AIA and it requires a parameter to be sent together with the name of the action, which points to the ID of the removed credential. So the URL for this example would be `kc_action=delete_credential:ce1008ac-f811-427f-825a-c0b878d1c24b`. In this case, the -part after the colon character (`ce1008ac-f811-427f-825a-c0b878d1c24b`) contains the ID of the credential of the particular user, which is to be deleted. The `Delete Credential` action +part after the colon character (`ce1008ac-f811-427f-825a-c0b878d1c24b`) contains the ID of the credential of the particular user, which is to be deleted. The `Delete Credential` action displays the confirmation screen where the user can confirm agreement to delete the credential. NOTE: The <<_account-service,{project_name} Account Console>> typically uses the `Delete Credential` action when deleting a 2nd-factor credential. You can check the Account Console for examples if you want @@ -74,5 +77,6 @@ to use this action directly from your own applications. However, relying on the [id="con-aia-available-actions_{context}"] == Available actions -To see all available actions, log in to the Admin Console and go to the top right top corner to click `Realm info` -> tab `Provider info` -> Find provider `required-action` . -But note that this can be further restricted based on what actions are enabled for your realm in the <>. +To see all available actions, log in to the Admin Console and select `master` realm. Then go to the right top corner and click on the name of the user -> select `Realm info` -> tab `Provider info`. Then in the table, find SPI `required-action` . In the +2nd column, there are available providers. Those can be used as values of the `kc_action` parameter (unless parameterized as described above). But note that this can be further restricted based on what actions are enabled for your realm in +the <>. diff --git a/docs/documentation/server_admin/topics/users/proc-enabling-recaptcha.adoc b/docs/documentation/server_admin/topics/users/proc-enabling-recaptcha.adoc index aa9bbee3c211..12cbc03ac0e9 100644 --- a/docs/documentation/server_admin/topics/users/proc-enabling-recaptcha.adoc +++ b/docs/documentation/server_admin/topics/users/proc-enabling-recaptcha.adoc @@ -21,10 +21,10 @@ https://www.google.com/recaptcha/admin/create . Create a reCAPTCHA and choose between Challenge v2 (visible checkbox) or Score-based, v3 (invisible) to get your reCAPTCHA site key and secret. Note them down for future use in this procedure. + -NOTE: The localhost works by default. You do not have to specify a domain. +NOTE: localhost domains are not supported by default. If you wish to continue supporting them for development you can add them to the list of supported domains for your site key. + . Navigate to the {project_name} admin console. -. Click *Authentication* in the menu. +. Click *Authentication* in the menu. . Click the *Flows* tab. . Select *Registration* from the list. . Set the *reCAPTCHA* requirement to *Required*. This enables @@ -43,8 +43,8 @@ image:images/recaptcha-config.png[] + NOTE: In {project_name}, websites cannot include a login page dialog in an iframe. This restriction is to prevent clickjacking attacks. You need to change the default HTTP response headers that is set in {project_name}. + -.. Click *Realm Settings* in the menu. -.. Click the *Security Defenses* tab. +.. Click *Realm Settings* in the menu. +.. Click the *Security Defenses* tab. .. Enter `https://www.google.com` in the field for the *X-Frame-Options* header (or `https//www.recaptcha.net` if you enabled *Use recaptcha.net*). .. Enter `https://www.google.com` in the field for the *Content-Security-Policy* header (or `https//www.recaptcha.net` if you enabled *Use recaptcha.net*). @@ -67,7 +67,7 @@ NOTE: The localhost works by default. You do not have to specify a domain. NOTE: For better security, click on *edit api key* and add an API restriction to restrict the key to the *reCAPTCHA Enterprise API* only. + . Navigate to the {project_name} Admin Console. -. Click *Authentication* in the menu. +. Click *Authentication* in the menu. . Click the *Flows* tab. . Duplicate the "registration" flow. . Bind the new flow to the *Registration flow*. diff --git a/docs/documentation/server_admin/topics/vault.adoc b/docs/documentation/server_admin/topics/vault.adoc index b6b63404e51d..83e21b06c742 100644 --- a/docs/documentation/server_admin/topics/vault.adoc +++ b/docs/documentation/server_admin/topics/vault.adoc @@ -33,7 +33,7 @@ All built-in providers support the configuration of key resolvers. A key resolve [source,bash] ---- -kc.[sh|bat] start --spi-vault-file-key-resolvers=REALM_UNDERSCORE_KEY,KEY_ONLY +kc.[sh|bat] start --spi-vault--file--key-resolvers=REALM_UNDERSCORE_KEY,KEY_ONLY ---- The resolvers run in the same order you declare them in the configuration. For each resolver, {project_name} uses the last entry name the resolver produces, which combines the realm with the vault key to search for the vault's secret. If {project_name} finds a secret, it returns the secret. If not, {project_name} uses the next resolver. This search continues until {project_name} finds a non-empty secret or runs out of resolvers. If {project_name} finds no secret, {project_name} returns an empty secret. diff --git a/docs/documentation/server_development/images/empty-user-federation-page.png b/docs/documentation/server_development/images/empty-user-federation-page.png index 5576a174f900..1ff3e73829c9 100644 Binary files a/docs/documentation/server_development/images/empty-user-federation-page.png and b/docs/documentation/server_development/images/empty-user-federation-page.png differ diff --git a/docs/documentation/server_development/images/storage-provider-created.png b/docs/documentation/server_development/images/storage-provider-created.png index 7e5757ce8cba..44e93e31d871 100644 Binary files a/docs/documentation/server_development/images/storage-provider-created.png and b/docs/documentation/server_development/images/storage-provider-created.png differ diff --git a/docs/documentation/server_development/images/user-federation-page.png b/docs/documentation/server_development/images/user-federation-page.png index 787eb9e08aff..595038863db6 100644 Binary files a/docs/documentation/server_development/images/user-federation-page.png and b/docs/documentation/server_development/images/user-federation-page.png differ diff --git a/docs/documentation/server_development/topics/auth-spi.adoc b/docs/documentation/server_development/topics/auth-spi.adoc index 9542a63cdad6..ab74c04ea19b 100644 --- a/docs/documentation/server_development/topics/auth-spi.adoc +++ b/docs/documentation/server_development/topics/auth-spi.adoc @@ -66,7 +66,7 @@ Cookie - ALTERNATIVE Kerberos - ALTERNATIVE Forms subflow - ALTERNATIVE Username/Password Form - REQUIRED - Conditional OTP subflow - CONDITIONAL + Conditional 2FA subflow - CONDITIONAL Condition - User Configured - REQUIRED OTP Form - REQUIRED ---- @@ -115,14 +115,14 @@ Let's walk through the steps from when a client first redirects to keycloak to a A failureChallenge() means that there is a challenge, but that the flow should log this as an error in the error log. This error log can be used to lock accounts or IP Addresses that have had too many login failures. If the username and password is valid, the provider associated the UserModel with the AuthenticationSessionModel and returns a status of success(). -. The next execution is a subflow called Conditional OTP. The executions for this subflow are loaded and the same processing logic occurs. Its Requirement is +. The next execution is a subflow called Conditional 2FA. The executions for this subflow are loaded and the same processing logic occurs. Its Requirement is Conditional. This means that the flow will first evaluate all conditional executors that it contains. Conditional executors are authenticators that implement `ConditionalAuthenticator`, and must implement the method `boolean matchCondition(AuthenticationFlowContext context)`. A conditional subflow will call the `matchCondition` method of all conditional executions it contains, and if all of them evaluate to true, it will act as if it was a required subflow. If not, it will act as if it was a disabled subflow. Conditional authenticators are only used for this purpose, and are not used as authenticators. This means that even if the conditional authenticator evaluates to "true", then this will not mark a flow or subflow as successful. For example, a flow containing only a Conditional subflow with only a conditional authenticator will never allow a user to log in. -. The first execution of the Conditional OTP subflow is the Condition - User Configured. +. The first execution of the Conditional 2FA subflow is the Condition - User Configured. This provider requires that a user has been associated with the flow. This requirement is satisfied because the UsernamePassword provider already associated the user with the flow. This provider's `matchCondition` method will evaluate the `configuredFor` method for all other Authenticators in its current subflow. If the subflow contains @@ -134,7 +134,7 @@ Let's walk through the steps from when a client first redirects to keycloak to a Since a user is required for this provider, the provider is also asked if the user is configured to use this provider. If user is not configured, then the flow will then set up a required action that the user must perform after authentication is complete. For OTP, this means the OTP setup page. If the user is configured, he will be asked to enter his otp code. In our scenario, because of the conditional - sub-flow, the user will never see the OTP login page, unless the Conditional OTP subflow is set to Required. + sub-flow, the user will never see the OTP login page, unless the Conditional 2FA subflow is set to Required. . After the flow is complete, the authentication processor creates a UserSessionModel and associates it with the AuthenticationSessionModel. It then checks to see if the user is required to complete any required actions before logging in. . First, each required action's evaluateTriggers() method is called. diff --git a/docs/documentation/server_development/topics/identity-brokering/account-linking.adoc b/docs/documentation/server_development/topics/identity-brokering/account-linking.adoc index 2db64fb37021..02a629066807 100644 --- a/docs/documentation/server_development/topics/identity-brokering/account-linking.adoc +++ b/docs/documentation/server_development/topics/identity-brokering/account-linking.adoc @@ -1,4 +1,5 @@ +[_client-initiated-account-linking] === Client initiated account linking Some applications want to integrate with social providers like Facebook, but do not want to provide an option to login via @@ -13,9 +14,30 @@ back to the server. The server establishes the link and redirects back to the a There are some preconditions that must be met by the client application before it can initiate this protocol: * The desired identity provider must be configured and enabled for the user's realm in the admin console. -* The user account must already be logged in as an existing user via the OIDC protocol * The user must have an `account.manage-account` or `account.manage-account-links` role mapping. * The application must be granted the scope for those roles within its access token + +The protocol is realized by the link:{adminguide_link}#con-aia_server_administration_guide[Application-initiated action (AIA)]. If you want the user, who is authenticated in your client application, to link +to the identity provider, attach the parameter `kc_action` with the value `idp_link:` to the OIDC authentication URL and redirect the user to this URL. For example, +to request linking to the identity provider with the alias `my-oidc-provider`, attach the parameter such as: + +[source,subs="attributes+"] +---- +kc_action=idp_link:my-oidc-provider +---- + +==== Refreshing external tokens + +If you use the external token generated by logging into the provider (such as a Facebook or GitHub token), you can refresh this token by re-initiating the account linking API. + +==== Legacy client initiated account linking + +WARNING: The legacy client initiated account linking is using a custom protocol that is not based on AIA. If you are use this protocol, consider migrating +your client application to the AIA based protocol described above because legacy client initiated account linking might be removed in the future {project_name} versions. + +In addition to the preconditions above, the legacy client initiated account linking has another precondition: + +* The user account must already be logged in as an existing user via the OIDC protocol * The application must have access to its access token as it needs information within it to generate the redirect URL. To initiate the login, the application must fabricate a URL and redirect the user's browser to this URL. The URL looks like this: @@ -87,8 +109,3 @@ to the application. If there is an error condition and the auth server deems it [WARNING] While this API guarantees that the application initiated the request, it does not completely prevent CSRF attacks for this operation. The application is still responsible for guarding against CSRF attacks target at itself. - -==== Refreshing external tokens - -If you are using the external token generated by logging into the provider (i.e. a Facebook or GitHub token), you can refresh this token by re-initiating the account linking API. - diff --git a/docs/documentation/server_development/topics/providers.adoc b/docs/documentation/server_development/topics/providers.adoc index 30ccaee75c62..e5d36452021d 100644 --- a/docs/documentation/server_development/topics/providers.adoc +++ b/docs/documentation/server_development/topics/providers.adoc @@ -91,7 +91,7 @@ For example, to configure a provider you can set options as follows: [source,bash] ---- -bin/kc.[sh|bat] --spi-theme-selector-my-theme-selector-enabled=true --spi-theme-selector-my-theme-selector-theme=my-theme +bin/kc.[sh|bat] --spi-theme-selector--my-theme-selector--enabled=true --spi-theme-selector--my-theme-selector--theme=my-theme ---- Then you can retrieve the config in the `ProviderFactory` init method: @@ -122,6 +122,44 @@ public class MyThemeSelectorProvider implements ThemeSelectorProvider { } ---- +The pom.xml file for your SPI requires a `dependencyManagement` section with an import reference to the {project_name} version that is intended for the SPI. In this example, replace the occurrence of `VERSION` with {project_versionMvn}, which is the current version of {project_name}. + +[source,xml] +---- + + + 4.0.0 + + org.example + test-lib + 1.0-SNAPSHOT + + + + + org.keycloak + keycloak-parent + VERSION + pom + import + + + + + + + org.keycloak + keycloak-model-jpa + provided + + + + +---- +<1> Replace `VERSION` with the current version of {project_name} + [[_override_builtin_providers]] ==== Override built-in providers @@ -196,7 +234,7 @@ one of them needs to be specified as the default one. For example such as: [source,bash] ---- -bin/kc.[sh|bat] build --spi-hostname-provider=default +bin/kc.[sh|bat] build --spi-hostname--provider=default ---- The value `default` used as the value of `default-provider` must match the ID returned by the `ProviderFactory.getId()` of the particular provider factory implementation. @@ -224,14 +262,14 @@ After registering new providers or dependencies Keycloak needs to be re-built wi [NOTE] ==== -Provider JARs are not loaded in isolated classloaders, so do not include resources or classes in your provider JARs that conflict with built-in resources or classes. -In particular the inclusion of an application.properties file or overriding the commons-lang3 dependency will cause auto-build to fail if the provider JAR is removed. +Provider JARs are not loaded in isolated classloaders, so do not include resources or classes in your provider JARs that conflict with built-in resources or classes. +In particular the inclusion of an application.properties file or overriding the commons-lang3 dependency will cause auto-build to fail if the provider JAR is removed. If you have included conflicting classes, you may see a split package warning in the start log for the server. Unfortunately not all built-in lib jars are checked by the split package warning logic, so you'll need to check the lib directory JARs before bundling or including a transitive dependency. Should there be a conflict, that can be resolved by removing or repackaging the offending classes. -There is no warning if you have conflicting resource files. You should either ensure that your JAR's resource files have path names that contain something unique to that provider, +There is no warning if you have conflicting resource files. You should either ensure that your JAR's resource files have path names that contain something unique to that provider, or you can check for the existence of `some.file` in the JAR contents under the `"install root"/lib/lib/main` directory with something like: - + [source,bash] ---- find . -type f -name "*.jar" -exec unzip -l {} \; | grep some.file @@ -245,7 +283,7 @@ If you find that your server will not start due to a `NoSuchFileException` error ---- This will force Quarkus to rebuild the classloading related index files. From there you should be able to perform a non-optimized start or build without an exception. -==== +==== ==== Disabling a provider @@ -255,7 +293,7 @@ For example to disable the Infinispan user cache provider use: [source,bash] ---- -bin/kc.[sh|bat] build --spi-user-cache-infinispan-enabled=false +bin/kc.[sh|bat] build --spi-user-cache--infinispan--enabled=false ---- [[_script_providers]] diff --git a/docs/documentation/server_development/topics/themes-react.adoc b/docs/documentation/server_development/topics/themes-react.adoc index 6e71b3c3d9eb..c922522c473f 100644 --- a/docs/documentation/server_development/topics/themes-react.adoc +++ b/docs/documentation/server_development/topics/themes-react.adoc @@ -34,15 +34,15 @@ import { KeycloakProvider } from "@keycloak/keycloak-ui-shared"; realm: "master", clientId: "security-admin-console" }}> - {/* rest of you application */} + {/* rest of your application */} ---- === Translating the pages The pages are translated using the `i18next` library. -You can set it up as described on their [website](https://react.i18next.com/). -If you want to use the translations that are provided then you need to add i18next-http-backend to your project and add: +You can set it up as described on their https://react.i18next.com/[website]. +If you want to use the translations that are provided then you need to add `i18next-fetch-backend` to your project and add: [source,javascript] ---- @@ -51,9 +51,9 @@ backend: { parse: (data: string) => { const messages = JSON.parse(data); - const result: Record = {}; - messages.forEach((v) => (result[v.key] = v.value)); //need to convert to record - return result; + return Object.fromEntries( + messages.map(({ key, value }) => [key, value]) + ); }, }, ---- @@ -61,5 +61,5 @@ backend: { === Using the pages All "pages" are React components that can be used in your application. -To see what components are available, see the [source](https://github.com/keycloak/keycloak/blob/main/js/apps/account-ui/src/index.ts). -Or have a look at the [quick start](https://github.com/keycloak/keycloak-quickstarts/tree/main/extension/extend-account-console-node) to see how to use them. +To see what components are available, see the https://github.com/keycloak/keycloak/blob/main/js/apps/account-ui/src/index.ts[source]. +Or have a look at the https://github.com/keycloak/keycloak-quickstarts/tree/main/extension/extend-account-console-node[quick start] to see how to use them. diff --git a/docs/documentation/server_development/topics/themes.adoc b/docs/documentation/server_development/topics/themes.adoc index 41c3d69c15f2..d52feea014bc 100644 --- a/docs/documentation/server_development/topics/themes.adoc +++ b/docs/documentation/server_development/topics/themes.adoc @@ -32,13 +32,13 @@ NOTE: To set the theme for the `master` Admin Console you need to set the Admin + . To see the changes to the Admin Console refresh the page. -. Change the welcome theme by using the `spi-theme-welcome-theme` option. +. Change the welcome theme by using the `spi-theme--welcome-theme` option. . For example: + [source,bash] ---- -bin/kc.[sh|bat] start --spi-theme-welcome-theme=custom-theme +bin/kc.[sh|bat] start --spi-theme--welcome-theme=custom-theme ---- [[_default-themes]] @@ -74,7 +74,7 @@ restarting {project_name}. + [source,bash] ---- -bin/kc.[sh|bat] start --spi-theme-static-max-age=-1 --spi-theme-cache-themes=false --spi-theme-cache-templates=false +bin/kc.[sh|bat] start --spi-theme--static-max-age=-1 --spi-theme--cache-themes=false --spi-theme--cache-templates=false ---- . Create a directory in the `themes` directory. @@ -123,11 +123,14 @@ It can be useful for instance if you redeployed custom providers or custom theme Theme properties are set in the file `/theme.properties` in the theme directory. -* parent - Parent theme to extend -* import - Import resources from another theme -* common - Override the common resource path. The default value is `common/keycloak` when not specified. This value would be used as value of suffix of `${url.resourcesCommonPath}`, which is used typically in freemarker templates (prefix of `${url.resoucesCommonPath}` value is theme root uri). -* styles - Space-separated list of styles to include -* locales - Comma-separated list of supported locales +`parent`:: Parent theme to extend +`import`:: Import resources from another theme +`common`:: Override the common resource path. The default value is `common/keycloak` when not specified. This value would be used as value of suffix of `${url.resourcesCommonPath}`, which is used typically in freemarker templates (prefix of `${url.resoucesCommonPath}` value is theme root uri). +`styles`:: Space-separated list of styles to include +`locales`:: Comma-separated list of supported locales +`contentHashPattern`:: Regex pattern of a file path in the theme where files have a content hash as part of their file name. +A content hash is usually an abbreviated hash of the file's contents. The hash will change when the contents of the file have changed, and is usually created using the bundling process of the JavaScript application bundling. +When the preview feature `rolling-updates:v2` is enabled, this allows for a more seamless rolling upgrade. There are a list of properties that can be used to change the css class used for certain element types. For a list of these properties look at the theme.properties file in the corresponding type of the keycloak theme (`themes/keycloak//theme.properties`). @@ -256,7 +259,7 @@ An example for a custom `footer.ftl` may look like this:
  • About
  • Contact
  • -
    +
    ``` diff --git a/docs/documentation/server_development/topics/user-storage.adoc b/docs/documentation/server_development/topics/user-storage.adoc index 9466e3a009c9..1b8e20eb2568 100644 --- a/docs/documentation/server_development/topics/user-storage.adoc +++ b/docs/documentation/server_development/topics/user-storage.adoc @@ -19,10 +19,10 @@ There are two sample projects in link:{quickstartRepo_link}[{quickstartRepo_name |=== |Name |Description -| {quickstartRepo_link}/tree/latest/extension/user-storage-jpa[user-storage-jpa] +| {quickstartRepo_link}/tree/main/extension/user-storage-jpa[user-storage-jpa] | Demonstrates implementing a user storage provider using JPA. -| {quickstartRepo_link}/tree/latest/extension/user-storage-simple[user-storage-simple] +| {quickstartRepo_link}/tree/main/extension/user-storage-simple[user-storage-simple] | Demonstrates implementing a user storage provider using a simple properties file that contains username/password key pairs. |=== diff --git a/docs/documentation/server_development/topics/user-storage/javaee.adoc b/docs/documentation/server_development/topics/user-storage/javaee.adoc index 85d03599f4b2..4fb8dc328077 100644 --- a/docs/documentation/server_development/topics/user-storage/javaee.adoc +++ b/docs/documentation/server_development/topics/user-storage/javaee.adoc @@ -8,7 +8,7 @@ endif::[] Therefore, the User Storage Providers cannot be packaged within any Jakarta EE component or make it an EJB as was the case when Keycloak ran over WildFly in previous versions. -Providers implementations are required to be plain java objects which implement the suitable User Storage SPI interfaces, as was explained in the previous sections. They must be packaged and deployed as stated in the Migration Guide. +Providers implementations are required to be plain java objects which implement the suitable User Storage SPI interfaces, as was explained in the previous sections. They must be packaged and deployed as stated in the Migration Guide. ifeval::[{project_community}==true] See https://www.keycloak.org/migration/migrating-to-quarkus#_migrating_custom_providers[Migrating custom providers]. endif::[] @@ -18,6 +18,6 @@ endif::[] You can still implement your custom `UserStorageProvider` class, which is able to integrate an external database by JPA Entity Manager, as shown in this example: - - {quickstartRepo_link}/tree/latest/extension/user-storage-jpa + - {quickstartRepo_link}/tree/main/extension/user-storage-jpa CDI is not supported. diff --git a/docs/documentation/server_development/topics/user-storage/simple-example.adoc b/docs/documentation/server_development/topics/user-storage/simple-example.adoc index aef44706d9f3..aa5c70f16f79 100644 --- a/docs/documentation/server_development/topics/user-storage/simple-example.adoc +++ b/docs/documentation/server_development/topics/user-storage/simple-example.adoc @@ -213,7 +213,7 @@ For example, by running the server with the following argument: [source,bash] ---- -kc.[sh|bat] start --spi-storage-readonly-property-file-path=/other-users.properties +kc.[sh|bat] start --spi-storage--readonly-property-file--path=/other-users.properties ---- We can specify the classpath of the user property file instead of hardcoding it. Then you can retrieve the configuration in the `PropertyFileUserStorageProviderFactory.init()`: diff --git a/docs/documentation/tests/pom.xml b/docs/documentation/tests/pom.xml index a8d4ec97ce72..f444f03bff9a 100644 --- a/docs/documentation/tests/pom.xml +++ b/docs/documentation/tests/pom.xml @@ -8,6 +8,7 @@ --add-opens=java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.io=ALL-UNNAMED + 1.18.3 @@ -129,6 +130,12 @@ 5.0.1 test + + org.jsoup + jsoup + ${version.jsoup} + test + diff --git a/docs/documentation/tests/src/test/java/org/keycloak/documentation/test/utils/HttpUtils.java b/docs/documentation/tests/src/test/java/org/keycloak/documentation/test/utils/HttpUtils.java index c643b1d6ea84..60813be6defa 100644 --- a/docs/documentation/tests/src/test/java/org/keycloak/documentation/test/utils/HttpUtils.java +++ b/docs/documentation/tests/src/test/java/org/keycloak/documentation/test/utils/HttpUtils.java @@ -10,6 +10,7 @@ import org.apache.hc.client5.http.impl.io.PoolingHttpClientConnectionManagerBuilder; import org.apache.hc.core5.http.ClassicHttpResponse; import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHeaders; import org.apache.hc.core5.http.HttpStatus; import org.apache.hc.core5.http.ParseException; import org.apache.hc.core5.http.io.HttpClientResponseHandler; @@ -77,6 +78,9 @@ public String handleResponse(ClassicHttpResponse r) throws IOException { }; try { + // add common headers that are needed by some pages + method.addHeader(HttpHeaders.ACCEPT_LANGUAGE, "en-US,en;q=0.9"); + method.addHeader(HttpHeaders.ACCEPT, "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"); client.execute(method, responseHandler); } catch (Exception e) { response.setError("exception " + e.getMessage()); diff --git a/docs/documentation/tests/src/test/java/org/keycloak/documentation/test/utils/LinkUtils.java b/docs/documentation/tests/src/test/java/org/keycloak/documentation/test/utils/LinkUtils.java index 332faa650db0..ed8a576d9e4b 100644 --- a/docs/documentation/tests/src/test/java/org/keycloak/documentation/test/utils/LinkUtils.java +++ b/docs/documentation/tests/src/test/java/org/keycloak/documentation/test/utils/LinkUtils.java @@ -1,6 +1,9 @@ package org.keycloak.documentation.test.utils; import org.apache.commons.io.FileUtils; +import org.jsoup.Jsoup; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; import org.keycloak.documentation.test.Config; import org.keycloak.documentation.test.Guide; @@ -68,15 +71,35 @@ public List findInvalidLinks(Guide guide) throws IOException { String anchor = link.contains("#") ? link.split("#")[1] : null; String error = null; - HttpUtils.Response response = anchor != null ? http.load(link) : http.isValid(link); + HttpUtils.Response response = http.load(link); if (response.getRedirectLocation() != null) { if (!validRedirect(response.getRedirectLocation(), Config.getInstance().getIgnoredLinkRedirects())) { error = "invalid redirect to " + response.getRedirectLocation(); } - } else if (response.isSuccess() && anchor != null) { - if (!(response.getContent().contains("id=\"" + anchor + "\"") || response.getContent().contains("name=\"" + anchor + "\"") || response.getContent().contains("href=\"#" + anchor + "\""))) { - error = "invalid anchor " + anchor; + } else if (response.isSuccess()) { + if (response.getContent().contains("http-equiv")) { + // The contains() will scan the document fast, while Jsoup parse will take extra CPU cycles to parse the document. + // Using Jsoup avoid parsing getting false positives from the document's contents. + Document doc = Jsoup.parse(response.getContent()); + Element refresh = doc.selectFirst("head > meta[http-equiv=refresh]"); + if (refresh != null) { + String content = refresh.attribute("content").getValue(); + if (content.contains(";")) { + String url = content.substring(content.indexOf(";") + 1).trim(); + if (url.startsWith("url=")) { + url = url.substring("url=".length()).trim(); + if (!validRedirect(url, Config.getInstance().getIgnoredLinkRedirects())) { + error = "invalid redirect to " + url; + } + } + } + } + } + if (anchor != null) { + if (!(response.getContent().contains("id=\"" + anchor + "\"") || response.getContent().contains("name=\"" + anchor + "\"") || response.getContent().contains("href=\"#" + anchor + "\""))) { + error = "invalid anchor " + anchor; + } } } else { error = response.getError(); diff --git a/docs/documentation/tests/src/test/resources/ignored-links b/docs/documentation/tests/src/test/resources/ignored-links index 48f763901109..b77d682d54a1 100644 --- a/docs/documentation/tests/src/test/resources/ignored-links +++ b/docs/documentation/tests/src/test/resources/ignored-links @@ -39,7 +39,9 @@ https://stackapps.com/apps/oauth/register # Failing because of broken certificate, can likely be restored later. https://docs.kantarainitiative.org* https://saml.xml.org* -# To be removed once KC 26.1 has been released -https://www.keycloak.org/server/logging#_configuring_levels_as_individual_options -https://www.keycloak.org/observability/* -https://www.keycloak.org/high-availability/concepts-memory-and-cpu-sizing#_measuring_the_activity_of_a_running_keycloak_instance \ No newline at end of file +# To be removed once KC 26.2 has been released +https://www.keycloak.org/server/caching#_securing_transport_stacks +https://www.keycloak.org/observability/grafana-dashboards +https://www.keycloak.org/securing-apps/token-exchange* +https://www.keycloak.org/operator/rolling-updates +https://www.keycloak.org/server/update-compatibility#rolling-updates-for-patch-releases \ No newline at end of file diff --git a/docs/documentation/topics/templates/document-attributes.adoc b/docs/documentation/topics/templates/document-attributes.adoc index 7b02f0e4df16..3d503401f975 100644 --- a/docs/documentation/topics/templates/document-attributes.adoc +++ b/docs/documentation/topics/templates/document-attributes.adoc @@ -48,12 +48,16 @@ :adminguide_bruteforce_link: {adminguide_link}#password-guess-brute-force-attacks :adminguide_eventlistener_name: Event listener :adminguide_eventlistener_link: {adminguide_link}#event-listener +:adminguide_finegrained_name: fine-grained admin permissions +:adminguide_finegrained_link: {adminguide_link}#_fine_grained_permissions :adminguide_timeouts_name: Timeouts :adminguide_timeouts_link: {adminguide_link}#_timeouts :adminguide_clearcache_name: Clearing Server Caches :adminguide_clearcache_link: {adminguide_link}#_clear-cache :apidocs_name: API Documentation :apidocs_link: https://www.keycloak.org/docs/{project_version}/api_documentation/ +:adminguide_email_name: Configuring email for a realm +:adminguide_email_link: {adminguide_link}#_email :bootstrapadminrecovery_name: Admin Bootstrap and Recovery :bootstrapadminrecovery_link: https://www.keycloak.org/server/bootstrap-admin-recovery :client_certificate_lookup_link: https://www.keycloak.org/server/reverseproxy#_enabling_client_certificate_lookup @@ -70,10 +74,12 @@ :gettingstarted_name_short: Getting Started :gettingstarted_link: https://www.keycloak.org/guides#getting-started :gettingstarted_link_latest: https://www.keycloak.org/guides#getting-started +:grafanadashboards_link: https://www.keycloak.org/observability/grafana-dashboards :highavailabilityguide_name: High Availability Guide :highavailabilityguide_link: https://www.keycloak.org/guides#high-availability +:observablitycategory_link: https://www.keycloak.org/guides#observability :tracingguide_name: Enabling Tracing -:tracingguide_link: https://www.keycloak.org/server/tracing +:tracingguide_link: https://www.keycloak.org/observability/tracing :upgradingguide_name: Upgrading Guide :upgradingguide_name_short: Upgrading :upgradingguide_link: {project_doc_base_url}/upgrading/ @@ -114,9 +120,6 @@ :jdgserver_name: Infinispan :jdgserver_version: 9.4.19 -:jdgserver_version_latest: 11.0.9 -:jdgserver_crossdcdocs_link: https://infinispan.org/docs/11.0.x/titles/xsite/xsite.html -:jdgserver_version_latest: 11.0.8 :subsystem_undertow_xml_urn: urn:jboss:domain:undertow:12.0 :subsystem_infinispan_xml_urn: urn:jboss:domain:infinispan:12.0 @@ -128,5 +131,8 @@ :securing_apps_name: Securing applications Guides :securing_apps_name_short: Securing applications :securing_apps_link: https://www.keycloak.org/guides#securing-apps -:securing_apps_java_policy_enforcer_link: https://www.keycloak.org/securing-apps/policy-enforcer +:securing_apps_base_link: https://www.keycloak.org/securing-apps +:securing_apps_java_policy_enforcer_link: {securing_apps_base_link}/policy-enforcer :securing_apps_java_policy_enforcer_name: Java Policy enforcer +:securing_apps_token_exchange_link: {securing_apps_base_link}/token-exchange +:securing_apps_token_exchange_name: Token exchange Documentation diff --git a/docs/documentation/upgrading/topics/changes/changes-19_0_0.adoc b/docs/documentation/upgrading/topics/changes/changes-19_0_0.adoc index 8396b5225236..7766b6aa084b 100644 --- a/docs/documentation/upgrading/topics/changes/changes-19_0_0.adoc +++ b/docs/documentation/upgrading/topics/changes/changes-19_0_0.adoc @@ -61,7 +61,7 @@ Apart from disabling the /q/ endpoints, these are the other improvements made to * The `health/ready` endpoint used for readiness probes still checks for a working database connection. Make sure you have not only `health-enabled=true` but also `metrics-enabled=true` set in your configuration, to enable the database check, resulting in an effective readiness probe. It will return HTTP status-code 503 and a status of DOWN when the database connection is not in a healthy state. Expect more enhancements in this area in the future. -For more information, see the https://www.keycloak.org/server/health[Health guide] +For more information, see the https://www.keycloak.org/observability/health[Health guide] = Changes using GELF / centralized log management diff --git a/docs/documentation/upgrading/topics/changes/changes-21_0_0.adoc b/docs/documentation/upgrading/topics/changes/changes-21_0_0.adoc index 443849b6248f..7aafb2d0bf30 100644 --- a/docs/documentation/upgrading/topics/changes/changes-21_0_0.adoc +++ b/docs/documentation/upgrading/topics/changes/changes-21_0_0.adoc @@ -1,7 +1,7 @@ = Keycloak uses Micrometer for metrics Keycloak provides an optional a metrics endpoint which exports metrics in the Prometheus format. -In this release the implementation to provide this data switched from SmallRye to Micrometer, which is the https://quarkus.io/guides/micrometer/[recommended metrics library for Quarkus]. +In this release the implementation to provide this data switched from SmallRye to Micrometer, which is the https://quarkus.io/guides/telemetry-micrometer[recommended metrics library for Quarkus]. Due to this change, metrics have been renamed. The following table shows some examples. @@ -52,9 +52,9 @@ backward compatibility mode is planned to be removed in Keycloak 24. = Deprecated methods from user session provider were removed -In Keycloak 13 there was introduced `UserLoginFailureProvider` and some methods from -`UserSessionProvider` were moved there. The methods in `UserSessionProvider` were deprecated -and now has been removed. Javadoc of these methods contained a corresponding replacement +In Keycloak 13 there was introduced `UserLoginFailureProvider` and some methods from +`UserSessionProvider` were moved there. The methods in `UserSessionProvider` were deprecated +and now has been removed. Javadoc of these methods contained a corresponding replacement (see Javadoc of Keycloak 20 release). = Custom themes using old admin console won't work diff --git a/docs/documentation/upgrading/topics/changes/changes-26_0_0.adoc b/docs/documentation/upgrading/topics/changes/changes-26_0_0.adoc index 28398e4ddf78..996ca2934428 100644 --- a/docs/documentation/upgrading/topics/changes/changes-26_0_0.adoc +++ b/docs/documentation/upgrading/topics/changes/changes-26_0_0.adoc @@ -366,7 +366,7 @@ If you are using a bundler like Vite or Webpack nothing changes, you'll have the ---- -You can also opt to use an link:https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script/type/importmap[import map] make the import of the library less verbose: +You can also opt to use an link:https://developer.mozilla.org/en-US/docs/Web/HTML/Reference/Elements/script/type/importmap[import map] make the import of the library less verbose: [source,html] ---- diff --git a/docs/documentation/upgrading/topics/changes/changes-26_1_3.adoc b/docs/documentation/upgrading/topics/changes/changes-26_1_3.adoc new file mode 100644 index 000000000000..218186baa73b --- /dev/null +++ b/docs/documentation/upgrading/topics/changes/changes-26_1_3.adoc @@ -0,0 +1,11 @@ +== Notable changes + +Notable changes where an internal behavior changed to prevent common misconfigurations, fix bugs or simplify running {project_name}. + +=== Send Reset Email force login again for federated users after reset credentials + +Previously the reset credentials flow (*forgot password* feature) kept the user logged in after the reset credentials if the same authentication session (same browser) was used. For federated user providers this behavior can be a security issue. Imagine a provider implementation that detects the user as *enabled*, performs the password change successfully but the validation of the user password fails for some reason. In this scenario the reset credentials flow allowed a user to be logged in after the successful password change that would have not been allowed to login using the normal browser flow. This scenario is not a common case but should be avoided by default. + +For this reason now the authenticator `reset-credential-email` (*Send Reset Email*) has a new configuration option called `force-login` (*Force login after reset*) with values `true` (always force the login), `false` (previous behavior that keeps the user logged in if the same authentication session is used), and `only-federated` (default value that forces federated users to authenticate again and keeps previous behavior for users stored in {project_name}'s internal database). + +For more information about changing this option, see link:{adminguide_link}#enabling-forgot-password[Enable forgot password]. \ No newline at end of file diff --git a/docs/documentation/upgrading/topics/changes/changes-26_2_0.adoc b/docs/documentation/upgrading/topics/changes/changes-26_2_0.adoc new file mode 100644 index 000000000000..f0bbc83e478f --- /dev/null +++ b/docs/documentation/upgrading/topics/changes/changes-26_2_0.adoc @@ -0,0 +1,188 @@ +== Breaking changes + +Breaking changes are identified as requiring changes from existing users to their configurations. + +=== Changes to port behaviour with the `X-Forwarded-Host` header + +The `X-Forwarded-Host` header can optionally also contain the port. In previous versions when the port was omitted from the header, +{project_name} fell back to the actual request port. For example if {project_name} was listening on port 8080 and the request contained +`X-Forwarded-Host: example.com` header, the resolved URL was `+http://example.com:8080+`. + +This is now changed and omitting the port results in removing it from the resolved URL. The resolved URL from the previous example +would now be `+http://example.com+`. + +To mitigate that, either make your reverse proxy include the port in the `X-Forwarded-Host` header or configure it to set +the `X-Forwarded-Port` header with the desired port. + +=== Changes to installing Oracle JDBC driver + +The required JAR for the Oracle JDBC driver that needs to be explicitly added to the distribution has changed. +Instead of providing `ojdbc11` JAR, use `ojdbc17` JAR as stated in the https://www.keycloak.org/server/db#_installing_the_oracle_database_driver[Installing the Oracle Database driver] guide. + +=== H2 Credentials + +With this version, the default H2 based `dev-file` database changed its credentials. While migrating from an instance using this dev only database is not supported, you may be able to continue to use your existing H2 database if you explicitly provide the old defaults for the database username and password. For example in the `keycloak.conf` specify: + +[example] +==== +db-username=sa + +db-password=password +==== + +=== JWT Client authentication aligned with the latest OIDC specification + +The latest draft version of the link:https://openid.net/specs/openid-connect-core-1_0-36.html#rfc.section.9[OpenID Connect core specification] changed the rules for +audience validation in JWT client assertions for the Client Authentication methods `private_key_jwt` and `client_secret_jwt`. + +Previously, the `aud` claim of a JWT client assertion was loosely defined as `The Audience SHOULD be the URL of the Authorization Server's Token Endpoint`, which did not exclude the usage of other URLs. + +The revised OIDC Core specification uses a stricter audience check: `The Audience value MUST be the OP's Issuer Identifier passed as a string, and not a single-element array.`. + +We adapted the JWT client authentication authenticators of both `private_key_jwt` and `client_secret_jwt` to allow only a single audience in the token by default. For now, the audience can be +issuer, token endpoint, introspection endpoint or some other OAuth/OIDC endpoint, which is used by client JWT authentication. However since there is single audience allowed now, it means that it is not possible +to use other unrelated audience values, which is to make sure that JWT token is really only useful by the {project_name} for client authentication. + +This strict audience check can be reverted to the previous more lenient check with a new option of OIDC login protocol SPI. It will be still allowed to use multiple audiences in JWT if server is started with the option: + +`--spi-login-protocol-openid-connect-allow-multiple-audiences-for-jwt-client-authentication=true` + +Note that this option might be removed in the future. Possibly in {project_name} 27. So it is highly recommended to update your clients to use single audience instead of using this option. It is also +recommended that your clients use the issuer URL for the audience when sending JWT for client authentication as that is going to be compatible with the future version of OIDC specification. + +== Notable changes + +Notable changes where an internal behavior changed to prevent common misconfigurations, fix bugs or simplify running {project_name}. + +=== `proxy-trusted-addresses` enforced for built-in X509 client certificate lookup providers + +Built-in X.509 client certificate lookup providers now reflect the `proxy-trusted-addresses` config option. A certificate provided through the HTTP headers will now be processed only if the proxy is trusted, or `proxy-trusted-addresses` is unset. + +=== Zero-configuration secure cluster communication + +For clustering multiple nodes, {project_name} uses distributed caches. +Starting with this release for all TCP-based transport stacks, the communication between the nodes is encrypted with TLS and secured with automatically generated ephemeral keys and certificates. + +If you are not using a TCP-based transport stack, it is recommended to migrate to the `jdbc-ping` transport stack to benefit from the simplified configuration and enhanced security. + +If you provided your own keystore and truststore to secure the TCP transport stack communication in previous releases, it is now recommended to migrate to the automatically generated ephemeral keys and certificates to benefit from the simplified setup. + +If you are using a custom transport stack, this default behavior can be disabled by setting the option `cache-embedded-mtls-enabled` to `false`. + +If you are using a service mesh, configure it to allow direct mTLS communication between the {project_name} Pods. + +For more information, check the link:https://www.keycloak.org/server/caching#_securing_transport_stacks[Securing Transport Stacks] in the distributed caches guide. + +=== Operator creates NetworkPolicies to restrict traffic + +The {project_name} Operator now creates by default a NetworkPolicy to restrict traffic to internal ports used for {project_name}'s distributed caches. + +This strengthens a secure-by-default setup and minimizes the configuration steps of new setups. +We expect this to be backwards compatible to existing deployment, so no additional steps are necessary at the time of the upgrade. +You can return to the previous behavior by disabling the creation of NetworkPolicies in the Keycloak CR. + +If your deployment scripts add explicit NetworkPolicies for {project_name}, you should consider removing those and migrate to the new functionality provided in the Keycloak CR as a follow-up to the upgrade. + +Read more about this in the https://www.keycloak.org/operator/advanced-configuration[Operator Advanced configuration]. + +=== Supported standard token exchange + +In this release, {project_name} added support for the link:{securing_apps_token_exchange_link}#_standard-token-exchange[Standard token exchange] (Feature `token-exchange-standard:v2`). In the past {project_name} releases, +{project_name} had only a preview token exchange feature, which is now referred to as link:{securing_apps_token_exchange_link}#_legacy-token-exchange[Legacy token exchange] (Feature `token-exchange:v1`). +The legacy token exchange is still in preview and it works the same way as in previous releases. If you used the link:{securing_apps_token_exchange_link}#_internal-token-to-internal-token-exchange[internal-internal token exchange], +consider migrating to the new standard token exchange. + +If you prefer to continue using the legacy token exchange, you will find it operates as in previous releases. No need exists to disable the standard token exchange feature. Your clients will use the standard token exchange only if it is enabled on the {project_name} client. However, migration to the standard token exchange is recommended. It is the officially supported method and the priority for enhancements. + +Consider the following notes as you plan for migration to the new standard token exchange: + +* The feature `token-exchange-standard`, which represents the new Standard token exchange, is enabled by default. It is recommended to +disable the `token-exchange` feature, which represents the Legacy token exchange, to make sure that requests will be served by the new standard token exchange. + +* You can have both the standard and legacy token exchange features enabled, which can be useful if you need to cover standard use cases (internal-internal) together with the other token exchange use cases that are implemented only by legacy token exchange. For instance, link:{securing_apps_token_exchange_link}#_external-token-to-internal-token-exchange[external to internal token exchange] is implemented only by the +legacy token exchange. In this case, {project_name} serves the standard internal-to-internal requests preferably by the standard token exchange while the other requests are served by the legacy token exchange. The choice of standard or legacy token exchange is determined based on the +parameters of the particular request. For example, requests containing non-standard parameters such as `requested_issuer` or `requested_subject` are considered legacy. ++ +If you still need legacy token exchange, you also need link:{adminguide_link}#_fine_grained_permissions[Fine-grained admin permissions version 1] enabled (FGAP:v1) because +link:{adminguide_link}#_fine_grained_permissions[version 2 (FGAP:v2)] does not have support for token exchange permissions. This is on purpose +because token-exchange is conceptually not really an "admin" permission and therefore token exchange permissions were not added to FGAP:v2. + +* Standard token exchange requires enabling a switch on the client as described in the link:{securing_apps_token_exchange_link}#_standard-token-exchange-enable[{securing_apps_token_exchange_name}]. + +Consider these additional changes in the behavior of the two types of token exchange: + +* Fine-grained admin permissions are no longer needed or supported for the standard token exchange. + +* The most notable change regarding the behavior of scopes and audiences is that the applied client scopes are based on the client triggering the token exchange request rather than the "target" client specified by the `audience` parameter. +Support exists for multiple values of the `audience` parameter as mentioned in the specification. The details are described in the link:{securing_apps_token_exchange_link}#_standard-token-exchange-scope[{securing_apps_token_exchange_name}]. + +* Public clients are no longer allowed to send the token exchange requests. Legacy token exchange allowed public clients to exchange tokens with themselves to downscope the original token. This use case can +instead be covered by using the refresh token grant, in which the `scope` parameter can be used to downscope the refreshed access token, as mentioned in +the https://datatracker.ietf.org/doc/html/rfc6749#section-6[OAuth2 specification]. + +* Exchanging an access token for a SAML assertion is not supported in this release. In other words, using `requested_token_type=urn:ietf:params:oauth:token-type:saml2` is not supported. + +* Exchanging an access token for a refresh token is allowed only if it is explicitly enabled on the client as mentioned in the link:{securing_apps_token_exchange_link}#_standard-token-exchange-details[{securing_apps_token_exchange_name}]. +Currently, it is not supported to request offline tokens or exchange a refresh token when the subject token was issued from an offline session. The recommended approach is to exchange for access tokens instead of +refresh token when possible. + +=== Fine-grained admin permissions supported + +Starting with this release, {project_name} introduces *fine-grained admin permissions V2*, offering an improved and more flexible authorization model for administrative permissions. + +* FGAP:V2 feature is enabled by default. +* FGAP:V1 feature remains in preview and can be enabled using `--features=admin-fine-grained-authz:v1`. However, V1 may be deprecated and removed in a future releases. + +==== Migration from V1 to V2 + +Due to fundamental changes in the permission model, **automatic migration from V1 to V2 is not available**. To simplify the transition: + +* A new `admin-permissions` client is introduced. This client is created when you enable the capability for the realm. The client holds the authorization model for FGAP:V2. +* The existing FGAP:V1 authorization model remains unchanged within the `realm-management` client. +* Administrators must _recreate permissions and policies_ using the new model, which can be configured in the updated *Permissions* section of the Admin Console. + +==== Key Differences Between FGAP:V1 and FGAP:V2 + +* Realm-level enablement: + ** FGAP:V2 can be enabled for a realm using the new *Admin Permissions* switch in *Realm Settings*. +* Centralized management: + ** The resource-specific *Permissions* tabs (for users, groups, clients, and roles) have been removed. + ** A new *Permissions* section provides centralized management for all administrative permissions from a single place in the Admin Console. +* Explicit operation scoping: + ** Transitive dependencies between permissions have been removed. + ** Administrators must now explicitly assign each required permission. + ** Example: To both view and manage a resource, both *view* and *manage* scopes for a permissions must be assigned separately. +* Permission model changes: + ** The *user-impersonated* user permission has been _removed_. + ** The *configure* client permission has been _removed_. With the introduction of explicit operation scoping in V2, the distinction between manage and configure became ambiguous. + ** The *user-impersonated* user permission has been _removed_. Instead, you can use the `impersonate-members` scope of the `Groups` resource type to allow or deny impersonation of group members. + ** Permissions to `manage-members` of a group do not allow a realm administrator to unassign members from groups. The reason for that is that in V1 this was allowing a member of a group to become + a regular realm user, and workaround permissions to create users in a realm. In the future, we will be working to provide additional scopes to allow deleting members from groups. +* Flexible resource scoping: + ** Unlike V1, where permissions were granted either to *a single resource* (for clients, groups, and roles) or *all resources* (for users), V2 introduces greater flexibility. + ** Administrators can now define permissions for: + *** A *specific resource* + *** A *set of selected resources* + *** *All resources* of a given type + *** This applies to *all resource types*: clients, users,groups, and roles. + +=== LDAP provider now can store new users, groups, and roles in a sub-DN of the base DN + +When adding new users, groups, or roles, the LDAP provider would always store them in the same base DN configured for the searches. However, in some deployments admins may want to configure a broader DN with `subtree` scope to fetch users (or groups/roles) from multiple sub-DNs, but they don't want new users (or groups/roles) to be stored in this base DN in LDAP. Instead, they would like to chose one of the sub-DNs for that. + +It is now possible to control where new users, groups, or roles will be created using the new `Relative User Creation DN` config option in the LDAP provider and also in the LDAP group and role mappers. For more details, check the link:{adminguide_link}#_ldap[LDAP admin guide] + +=== Removal of the `X-XSS-Protection` header + +Because the https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/X-XSS-Protection[`X-XSS-Protection` header] is no longer supported by any user agents that are supported by Keycloak, it has been removed. This header was a feature of Internet Explorer, Chrome, and Safari that stopped pages from loading when they detected reflected cross-site scripting (XSS) attacks. + +We don't expect that this will impact any deployments due to the lack of support in user agents, as well as this feature being supplanted by https://developer.mozilla.org/en-US/docs/Web/HTTP/Guides/CSP[Content Security Policy (CSP)]. + +=== JWT client authentication defines a new max expiration option for the token + +When a client is configured to authenticate using the *Signed JWT* or *Signed JWT with Client Secret* type, {project_name} now enforces a maximum expiration for the token. This means that, although the `exp` (expiration) claim in the token may be much later, {project_name} will not accept tokens issued before that max expiration time. The default value is 60 seconds. Note that JWT tokens should be issued right before being sent for authentication. This way, the client has one minute window to send the token for login. Nevertheless this expiration can be tuned using the *Max expiration* configuration option in the client *Credentials* tab (see link:{adminguide_link}#_client-credentials[Confidential client credentials in the {adminguide_name}] for more information). + +=== Updates to the `user-profile-commons.ftl` theme template +The `user-profile-commons.ftl` changed to improve support for localization. See https://github.com/keycloak/keycloak/issues/38029. +As a result, and if you are extending this template, pages might start displaying a `locale` field. To avoid that, update +the theme template with the changes aforementioned. diff --git a/docs/documentation/upgrading/topics/changes/changes-26_3_0.adoc b/docs/documentation/upgrading/topics/changes/changes-26_3_0.adoc new file mode 100644 index 000000000000..8fe4666ed4b6 --- /dev/null +++ b/docs/documentation/upgrading/topics/changes/changes-26_3_0.adoc @@ -0,0 +1,227 @@ +== Breaking changes + +Breaking changes are identified as requiring changes from existing users to their configurations. +In minor or patch releases, we will only do breaking changes to fix important bugs. + +=== Reading information about temporarily locked users + +In previous releases there was an inconsistency in the REST endpoint result of getting a user (`+GET /admin/realms/{realm}/users/{user-id}+`) and searching for a user (`+GET /admin/realms/{realm}/users+`). When BruteForce is enabled and a user was temporarily locked out the former endpoint would return `enabled=false` while the latter would return `enabled=true`. If the user was updated and enabled was false due to temporary lockout then the user would be disabled permanently. Both endpoints now return `enabled=true` when a user is temporarily locked out. To check whether a user is temporarily locked out the BruteForceUserResource endpoint should be utilised (`+GET /admin/realms/{realm}/attack-detection/brute-force/users/{userId}+`). + +=== User searches through the User API are now respecting the user profile settings + +When querying users through the User API, the user representation and their attributes are now taking into account the +user profile settings defined for the realm. + +It might happen that attributes in user representations are no longer available depending on the +user profile configuration where too much information was returned in the past. + +== Notable changes + +Notable changes where an internal behavior changed to prevent common misconfigurations, fix bugs or simplify running {project_name}. + +=== Different credentials of a user need to have different names + +When adding an OTP, WebAuthn or any other 2FA credentials, the name the user assigns to this credential needs to be unique for the given user. +This allows the user to distinguish between those credentials, and either update or delete them later. +If a user tries to create a credential with an already existing name, there is an error message and the user is asked to change the name of the new credential. + +=== Restrict admin role mappings to server administrators + +To enhance security, only users with the `admin` role in the `master` realm (server admins) can assign admin roles. This ensures that critical permissions cannot be delegated by realm-level administrators. + +=== Problematic cache configurations ignored + +Previous versions of {project_name} warned about problematic configurations, for example, if a wrong number of owners was configured or a cache size was set when it should not have been set when enabling volatile user sessions. +The docs also stated to update the `cache-ispn.xml` configuration file for volatile user sessions. + +The current version will always use safe settings for the number of owners and maximum cache size for the affected user and client session caches, and will log only an INFO message. +With this behavior, there is no need any more to update the `cache-ispn.xml` configuration file. +If you previously used a custom `cache-ispn.xml` in order to use volatile user sessions, we recommend reverting those changes and use the standard configuration file. + +=== Email verification is now automatically set when using a OpenID Connect broker with `Trust email` is enabled and `Sync Mode` is `FORCE` + +Until now, the OpenID Connect broker did not support the standard `email_verified` claim available from the ID Tokens issued by +OpenID Connect Providers. + +In this release, the broker was updated to respect the value from this claim to set the email verification status for the federated (local) user account. +Whenever users are federated for the first time or re-authenticating, if the `Trust email` setting is enabled and `Sync Mode` is set to `FORCE`, +the user account will be updated to (un)mark the email as verified. +If the provider does not send the claim, it defaults to the original behavior and sets the email as verified. + +In the future, we might evaluate changing this specific configuration to avoid automatic updates on the email verification +status on federated user accounts depending on the use cases and the demand from the community. + +=== Verify existing account by Email is only executed for the email and username sent by the identity provider + +The execution *Verify Existing Account By Email* is one of the alternatives that the *First Login Flow* has to allow a brokering account to be linked to an existing {project_name} user. This step is executed when the user logs in into {project_name} through the broker for the first time, and the identity provider account is already present in {project_name}. The execution sends an email to the current {project_name} address in order to confirm the user controls that account. + +Since this release, the *Verify Existing Account By Email* execution is only attempted in the *First Login Flow* if the linking attributes (email and username) sent by the external identity provider are not modified by the user during the review process. This new behavior avoids sending verification emails to an existing {project_name} account that can inadvertently accept the linking. + +In case the provider needs to modify the information sent by the identity provider (because emails or usernames are different in the broker), only the other alternative *Verify Existing Account By Re-authentication* is available to link the new account to the existing {project_name} user. + +If the data received from the identity provider is mandatory and cannot be modified, then the *Review Profile* step in the *First Login Flow* can be disabled to avoid any user intervention. + +For more information, see link:{adminguide_link}#_identity_broker_first_login[the First login flow section of the {adminguide_name}]. + +=== Signing out from other devices now disabled by default + +Previously, when a user updated their credentials, like changing their password or adding another factor like an OTP or Passkey, they had a checkbox *Sign out from other devices* which was checked by default. Since this release, {project_name} displays the checkbox *Sign out from other devices* not checked by default. This checkbox should now be intentionally enabled by the user to logout all the other related sessions associated to the same user. + +=== Signing out from other devices logs out offline sessions + +Related to the previous point, in previous versions, the *Sign out from other devices* checkbox logged out only regular sessions. +Starting with this release, it logs out also offline sessions as this is what users would expect to happen given the current screen design. + +To revert to the old behavior, enable the deprecated feature `logout-all-sessions:v1`. +This deprecated feature will be removed in a future version. + +=== Updates to the `user-profile-commons.ftl` theme template + +The `user-profile-commons.ftl` changed to improve support for localization. See https://github.com/keycloak/keycloak/issues/38029. +As a result, and if you are extending this template, pages might start displaying a `locale` field. To avoid that, update +the theme template with the changes aforementioned. + +=== Subgroup counts are no longer cached + +When returning subgroups of a group, the count of subgroups of each subgroup of a group is no longer cached. With the +introduction of Fine-Grained Admin Permissions, the result set is filtered at the database level based on any permissions +defined to a realm so that the count will change accordingly to these permissions. + +Instead of caching the count, a query will be executed every time to obtain the expected number of groups an administrator can access. + +Most of the time, this change will not impact clients querying the API to fetch the subgroups of a group. However, if not the case, +a new parameter `subGroupsCount` was introduced to the following endpoints: + +* `+/realms/{realm}/groups/{id}/children+` +* `+/realms/{realm}/groups+` + +With this parameter, clients can decide whether the count should be returned to each individual group returned. To not break existing deployments, +this parameter defaults to `true` so that the count is returned if the parameter is not set. + +=== Upgrade procedure changed for the distribution + +If you are upgrading {project_name} by downloading the distribution, the upgrade procedure has been changed. Previously it recommended copying over the contents from the `conf/` folder from the old to the new installation. +The new procedure recommends to re-apply any changes to `cache-ispn.xml` or a custom cache configuration based on the file included in the new version. + +This prevents accidentally downgrading functionality, for example, by using an old `cache-ispn.xml` file from a previous version. + +=== Default browser flow changes 2FA to include WebAuthn and Recovery Codes + +Previously the default *browser* flow had a *Browser - Conditional OTP* conditional sub-flow that enabled One-Time Password (OTP) as a 2nd Factor Authentication (2FA). Starting with this version, the sub-flow is renamed to *Browser - Conditional 2FA*, the *OTP Form* is _Alternative_, and includes two more 2FA methods: *WebAuthn Authenticator* and *Recovery Authentication Code Form*. Both new executions are _Disabled_ by default, but they can be set to _Alternative_ to include them into the flow. + +Upgraded realms will not be changed. The updated flow will only be available for new realms. Take this change into consideration if you have automated the realm creation. + +=== Syslog counting framing now enabled based on protocol + +Syslog messages sent over `tcp` (or `ssl-tcp`) protocol now use counting framing by default, prefixing messages with their size as required by some Syslog servers. + +To change this behavior, use the `--log-syslog-counting-framing` option with one of the following values: `protocol-dependent` (default), `true`, or `false`. + +== Deprecated features + +The following sections provide details on deprecated features. + +=== SPI options separating the provider with a single dash + +SPI options ending in `-enabled`, `-provider-default`, or `-provider` are treated as build-time options. However, in some instances, this was not correct as a provider could have a configuration property ending in one of those suffixes as well. + +To resolve this ambiguity, and any potential ambiguity involving SPI and provider names, a new SPI option format was introduced where the scopes and suffix are separated by `--`(double dash) instead of `-`(dash). The new format then reads as `+spi-----...+`. + +An SPI property ending in `-enabled`, `-provider-default`, or `-provider` should use the new format or else a warning will be emitted. For example `spi-----enabled` will be recognized as a build-time option without a warning. + +For instance, the correct way to reference your custom email template is: `--spi-email-template--mycustomprovider--enabled` (not `--spi-email-template-mycustomprovider-enabled`). + +Options using the legacy format and ending in `-enabled`, `-provider-default`, or `-provider` will still be treated as a build-time option, but may not be in future releases. + +=== Kubernetes cache stack has been deprecated + +The `kubernetes` cache stack has been deprecated and will be removed in a future release. Users should transition to the `jdbc-ping` stack. + +Consequently, the Keycloak Operator now uses the `jdbc-ping` cache stack by default. + +=== Deprecation of `method RequiredActionProvider.getMaxAuthAge()` +The method `RequiredActionProvider.getMaxAuthAge()` is deprecated. It is effectively not used now. Please use the method `RequiredActionProvider.getMaxAuthAge(KeycloakSession session)` instead. This is due to enable individual configuration for required actions. + +=== Deprecation of `spi-connections-infinispan-quarkus-site-name` + +The option `spi-connections-infinispan-quarkus-site-name` is deprecated and no longer used for multi-site setups, and it will be removed in the future. +Use `spi-cache-embedded-default-site-name` instead in setups when running with embedded distributed caches. +See the https://www.keycloak.org/server/all-provider-config[All provider configuration] for more details on these options. + +=== Deprecated proprietary protocol for client initiated linking to the identity provider account + +When you want the user, who is authenticated to your client application, to link his or her account to a specific identity provider, consider using the Application initiated action (AIA) based +mechanism with the action `idp_link`. The proprietary custom protocol for client initiated account linking is deprecated now and might be removed in the future versions. For more information, see the +Client initiated account link section of the link:{developerguide_link}[{developerguide_name}]. + +=== Deprecated for removal the Instagram Identity Broker + +In this release, the Instagram Identity Broker is deprecated for removal and is not enabled by default. +If you are using this broker, it is recommended to use the Facebook Identity Broker instead. + +For more details, see +https://github.com/keycloak/keycloak/issues/37967[Deprecate for removal the Instagram social broker]. + +If you are using the Instagram Identity Broker and want to re-enable it, you can do it by enabling the `instagram-broker` +feature using the `features` server option: + +[source] +---- +--features=instagram-broker +---- + +It has been a while since discussions started about any activity around the Instagram Identity Broker +and any objection from the community about deprecating it for removal. For more details, see +https://github.com/keycloak/keycloak/issues/37967[Deprecate for removal the Instagram social broker]. + +=== Local admin deprecated for removal + +`UrlType.LOCAL_ADMIN` and the corresponding welcome theme variable `localAdminUrl` have been deprecated for eventual removal. The default welcome resource will now simply mention localhost rather than providing a URL when an admin user has yet to be created. + +=== Deprecated password policy Recovery Codes Warning Threshold + +In relation to supported Recovery codes, we deprecated the password policy `Recovery Codes Warning Threshold`. This password policy might be removed in the future major version of {project_name}. +This password policy was not related to passwords at all, but was related to recovery codes, and hence using password policy is not appropriate way for the configuration of the threshold. It is +recommended to use the configuration option *Warning Threshold* of the *Recovery Authentication Codes* required action instead of using password policy. For more details, see the link:{adminguide_link}#_recovery-codes[Recovery codes documentation]. + +=== Scope.getPropertyNames deprecated for removal + +The `org.keycloak.Config.Scope.getPropertyNames` method has been deprecated for removal. + +=== Deprecation of the Passkeys Conditional UI Authenticator + +The preview feature *Passkeys* recently introduced a new *Passkeys Conditional UI Authenticator* that you can use to integrate the passkey auto-fill or conditional UI feature in your login flow. Passkeys are now being seamlessly integrated into {project_name} inside the default username forms. Therefore, the old authenticator is invalid and it is deprecated in this release. The factory and implementation classes will be removed when *Passkeys* are supported in {project_name}. + +== Removed features + +The following features have been removed from this release. + +=== Removal of `jboss.site.name` and `jboss.node.name` + +Both system properties have been used internally within Keycloak and have not been part of the official documentation. +{project_name} will fail to start if those are present. + +Instead, use the command line option `spi-cache-embedded-default-site-name` as `jboss.site.name` replacement, and `spi-cache-embedded-default-node-name` as `jboss.node.name` replacement. +See the https://www.keycloak.org/server/all-provider-config[All provider configuration] for more details on these options. + +=== `KeycloakSessionTask.useExistingSession` method removed + +`KeycloakSessionTask.useExistingSession` was only useful to private server logic. Now that this logic has been refined, there is no need for this method. + +In previous releases there was a default implementation in the interface returning `false`,Wwe considered it unlikely that it was overwritten in implementations. + +=== Usage of remote stores embedded caches is restricted + +The experimental feature `cache-embedded-remote-store` was removed in this release and usage of remote stores for embedded caches is now restricted. + +Consider one of the following cases and recommended migration steps: + +* If you are using remote stores for running {project_name} in multiple data centers especially if they do not have a direct networking connection to allow all {project_name} nodes to form a cluster, follow the link:{highavailabilityguide_link}[{highavailabilityguide_name}] for deploying a multi-site {project_name} setup. +* If you are using remote stores to keep user sessions available after a {project_name} restart, use the `peristent-user-session` feature which is enabled by default. + +[WARNING] +==== +* {project_name} refuses to start if the `persistent-user-session` feature is disabled and remote store is configured for any of the user session caches. + +* With the feature `persistent-user-session` feature enabled, the remote store configuration is ignored and {project_name} will print a warning. +==== diff --git a/docs/documentation/upgrading/topics/changes/changes-26_3_1.adoc b/docs/documentation/upgrading/topics/changes/changes-26_3_1.adoc new file mode 100644 index 000000000000..884556ac5cd6 --- /dev/null +++ b/docs/documentation/upgrading/topics/changes/changes-26_3_1.adoc @@ -0,0 +1,8 @@ +// ------------------------ Notable changes ------------------------ // +== Notable changes + +Notable changes where an internal behavior changed to prevent common misconfigurations, fix bugs or simplify running {project_name}. + +=== Options for additional datasources marked as preview + +In the 26.3.0 release, the newly added options for configuring additional datasources were missing a preview label. This has been now corrected as the work on this feature continues over a few next releases. diff --git a/docs/documentation/upgrading/topics/changes/changes-26_4_0.adoc b/docs/documentation/upgrading/topics/changes/changes-26_4_0.adoc new file mode 100644 index 000000000000..54daaf12cb09 --- /dev/null +++ b/docs/documentation/upgrading/topics/changes/changes-26_4_0.adoc @@ -0,0 +1,74 @@ +// ------------------------ Breaking changes ------------------------ // +== Breaking changes + +Breaking changes are identified as requiring changes from existing users to their configurations. +In minor or patch releases we will only do breaking changes to fix bugs. + +=== + +// ------------------------ Notable changes ------------------------ // +== Notable changes + +Notable changes where an internal behavior changed to prevent common misconfigurations, fix bugs or simplify running {project_name}. + +=== Usage of the `exact` request parameter when searching users by attributes + +If you are querying users by attributes through the User API where you want to fetch users that match a specific attribute key (regardless the value), +you should consider setting the `exact` request parameter to `false` when invoking the `+/admin/realms/{realm}/users+` using +the `GET` method. + +For instance, searching for all users with the attribute `myattribute` set should be done as follows: + +[source] +---- +GET /admin/realms/{realm}/users?exact=false&q=myattribute: +---- + +The {project_name} Admin Client is also updated with a new method to search users by attribute using the `exact` request parameter. + +=== Automatic database connection properties for the PostgreSQL driver + +When running PostgreSQL reader and writer instances, {project_name} needs to always connect to the writer instance to do its work. + +Starting with this release, and when using the original PostgreSQL driver, {project_name} sets the `targetServerType` property of the PostgreSQL JDBC driver to `primary` to ensure that it always connects to a writable primary instance and never connects to a secondary reader instance in failover or switchover scenarios. + +You can override this behavior by setting your own value for `targetServerType` in the DB URL or additional properties. + +=== JGroups system properties replaced with CLI options + +Until now it was necessary to configure JGroups network addresses and ports using the `+jgroups.bind.*+` and `+jgroups.external_*+` +system properties. In this release we have introduced the following CLI options to allow these addresses and ports to be +configured directly via {project_name}: `cache-embedded-network-bind-address`, `cache-embedded-network-bind-port`, +`cache-embedded-network-external-address`, `cache-embedded-network-external-port`. Configuring ports using the old +properties will still function as before, but we recommend to change to the CLI options as this may change in the future. + +=== Volatile user sessions affecting offline session memory requirements + +Starting with this release, {project_name} will cache by default only 10000 entries for offline user and client sessions in memory when volatile user sessions are enabled. This will greatly reduce memory usage. + +Use the options `cache-embedded-offline-sessions-max-count` and `cache-embedded-offline-client-sessions-max-count` to change size of the offline session caches. + +// ------------------------ Deprecated features ------------------------ // +== Deprecated features + +The following sections provide details on deprecated features. + +=== Deprecated `displayTest` field in `ConsentScopeRepresentation` + +The `displayTest` field in the `ConsentScopeRepresentation` class returned by the Account REST service has been deprecated due to a typo in its name. +A new field `displayText` with the correct spelling has been added to replace it. The old field will be removed in {project_name} 27.0. +The Typescript code `ConsentScopeRepresentation` for the Account Console already contains only the new field. + +=== Lifetime of offline session caches + +The options `--spi-user-sessions--infinispan--offline-session-cache-entry-lifespan-override` and `spi-user-sessions--infinispan--offline-client-session-cache-entry-lifespan-override` are now deprecated for removal. + +Instead use the options `cache-embedded-offline-sessions-max-count` and `cache-embedded-offline-client-sessions-max-count` to limit the memory usage if the default of 10000 cache offline user and client sessions does not work in your scenario. + +// ------------------------ Removed features ------------------------ // +== Removed features + +The following features have been removed from this release. + +=== + diff --git a/docs/documentation/upgrading/topics/changes/changes-template.adoc b/docs/documentation/upgrading/topics/changes/changes-template.adoc new file mode 100644 index 000000000000..e75527c9e376 --- /dev/null +++ b/docs/documentation/upgrading/topics/changes/changes-template.adoc @@ -0,0 +1,29 @@ +// ------------------------ Breaking changes ------------------------ // +== Breaking changes + +Breaking changes are identified as requiring changes from existing users to their configurations. +In minor or patch releases we will only do breaking changes to fix bugs. + +=== + +// ------------------------ Notable changes ------------------------ // +== Notable changes + +Notable changes where an internal behavior changed to prevent common misconfigurations, fix bugs or simplify running {project_name}. + +=== + +// ------------------------ Deprecated features ------------------------ // +== Deprecated features + +The following sections provide details on deprecated features. + +=== + +// ------------------------ Removed features ------------------------ // +== Removed features + +The following features have been removed from this release. + +=== + diff --git a/docs/documentation/upgrading/topics/changes/changes.adoc b/docs/documentation/upgrading/topics/changes/changes.adoc index a64dc4ba60d0..ea7f6ba5d3c3 100644 --- a/docs/documentation/upgrading/topics/changes/changes.adoc +++ b/docs/documentation/upgrading/topics/changes/changes.adoc @@ -1,6 +1,22 @@ [[migration-changes]] == Migration Changes +=== Migrating to 26.4.0 + +include::changes-26_4_0.adoc[leveloffset=2] + +=== Migrating to 26.3.0 + +include::changes-26_3_0.adoc[leveloffset=2] + +=== Migrating to 26.2.0 + +include::changes-26_2_0.adoc[leveloffset=2] + +=== Migrating to 26.1.3 + +include::changes-26_1_3.adoc[leveloffset=2] + === Migrating to 26.1.0 include::changes-26_1_0.adoc[leveloffset=2] diff --git a/docs/documentation/upgrading/topics/download.adoc b/docs/documentation/upgrading/topics/download.adoc index 0910855f2293..80ec3b1dfcde 100644 --- a/docs/documentation/upgrading/topics/download.adoc +++ b/docs/documentation/upgrading/topics/download.adoc @@ -11,4 +11,8 @@ from the {project_name} website. + After extracting this file, you should have a directory that is named `{archivebasename}-{project_version}`. . Move this directory to the desired location. -. Copy `conf/`, `providers/` and `themes/` from the previous installation to the new installation. \ No newline at end of file +. Copy `providers/` and `themes/` from the previous installation to the new installation. +. Copy all files except `cache-ispn.xml` from `conf/` from the previous installation to the new installation. +. If you modified `cache-ispn.xml` or created a custom cache configuration file: +.. Re-apply your changes based on the `cache-ispn.xml` file shipped with the new installation, and place them in the new installation. +.. Review the latest {project_name} configuration options for cache sizes and transport stacks if they can be used instead of your modifications as they provide better documentation, additional validations and functionality, and a simpler upgrade experience. diff --git a/docs/documentation/upgrading/topics/migrate_db.adoc b/docs/documentation/upgrading/topics/migrate_db.adoc index 4e65d2e6cb40..1b5c342eb614 100644 --- a/docs/documentation/upgrading/topics/migrate_db.adoc +++ b/docs/documentation/upgrading/topics/migrate_db.adoc @@ -5,17 +5,27 @@ {project_name} can automatically migrate the database schema, or you can choose to do it manually. By default the database is automatically migrated when you start the new installation for the first time. +[NOTE] +==== +Before you migrate the database, shut down all {project_name} nodes running the old version of {project_name}. +==== + +[NOTE] +==== +Migration is not supported with the default H2 based `dev-file` database type. +==== + === Automatic relational database migration To perform an automatic migration, start the server connected to the desired database. If the database schema has changed for the new server version, the migration starts automatically unless the database has too many records. For example, creating an index on tables with millions of records can be time-consuming and cause a major service disruption. Therefore, a threshold of `300000` records exists for automatic migration. If the number of records exceeds this threshold, the index is not created. Instead, you find a warning in the server logs with the SQL commands that you can apply manually. -To change the threshold, set the `index-creation-threshold` property, value for the default `connections-liquibase` provider: +To change the threshold, set the `index-creation-threshold` property, value for the `connections-liquibase` provider: [source,bash] ---- -kc.[sh|bat] start --spi-connections-liquibase-default-index-creation-threshold=300000 +kc.[sh|bat] start --spi-connections-liquibase--quarkus--index-creation-threshold=300000 ---- === Manual relational database migration @@ -25,18 +35,17 @@ default `connections-jpa` provider: [source,bash] ---- -kc.[sh|bat] start --spi-connections-jpa-quarkus-migration-strategy=manual +kc.[sh|bat] start --spi-connections-jpa--quarkus--migration-strategy=manual ---- -When you start the server with this configuration, the server checks if the database needs to be migrated. -The required changes are written to the `bin/keycloak-database-update.sql` SQL file that you can review and manually run against the database. +When you start the server with this configuration, the server checks if the database needs to be migrated. If migration is needed, the required changes are written to the `bin/keycloak-database-update.sql` SQL file. You can review and manually run these commands against the database. To change the path and name of the exported SQL file, set the `migration-export` property for the default `connections-jpa` provider: [source,bash] ---- -kc.[sh|bat] start --spi-connections-jpa-quarkus-migration-export=/ +kc.[sh|bat] start --spi-connections-jpa--quarkus--migration-export=/ ---- For further details on how to apply this file to the database, see the documentation for your relational database. diff --git a/docs/documentation/upgrading/topics/migrate_themes.adoc b/docs/documentation/upgrading/topics/migrate_themes.adoc index 869130cf83d6..a700d145e3d0 100644 --- a/docs/documentation/upgrading/topics/migrate_themes.adoc +++ b/docs/documentation/upgrading/topics/migrate_themes.adoc @@ -14,9 +14,7 @@ If you created custom themes, those themes must be migrated to the new server. A === Migrating templates -If you customized any template, review the new version to decide about updating your customized template. If you made minor changes, you could compare the updated template to your customized template. However, if you made many changes, consider comparing the new template to your customized template. This comparison will show you what changes you need to make. - -You can use a diff tool to compare the templates. The following screenshot compares the `info.ftl` template from the Login theme and an example custom theme: +If you customized any template, compare it to the new version of that template. This comparison shows you what changes you need to apply to your customized template. You can use a diff tool to compare the templates. The following screenshot compares the `info.ftl` template from the Login theme and an example custom theme: .Updated version of a Login theme template versus a custom Login theme template image:images/theme-migration-meld-info-1.png[Updated version of a Login theme template versus a custom Login theme template] diff --git a/docs/documentation/upgrading/topics/upgrade_adapters.adoc b/docs/documentation/upgrading/topics/upgrade_adapters.adoc index 0a82495c3314..eb1b582bbf11 100644 --- a/docs/documentation/upgrading/topics/upgrade_adapters.adoc +++ b/docs/documentation/upgrading/topics/upgrade_adapters.adoc @@ -28,20 +28,24 @@ To upgrade the {appserver_name} adapter, complete the following steps: [[_upgrade_js_adapter]] == Upgrading the JavaScript adapter - -To upgrade a JavaScript adapter, install the latest version https://www.npmjs.com/package/keycloak-js[from NPM]. +ifeval::[{project_community}==true] +To upgrade the JavaScript adapter, install the latest version https://www.npmjs.com/package/keycloak-js[from NPM]. .Procedure . `npm install keycloak-js@latest` +endif::[] +ifeval::[{project_product}==true] +To upgrade the JavaScript adapter, install the latest version {securing_apps_link}/#javascript-adapter-installation[from NPM]. +endif::[] [[_upgrade_nodejs_adapter]] == Upgrading the `Node.js` adapter -To upgrade a `Node.js` adapter that has been copied to your web application, perform the following procedure. +ifeval::[{project_community}==true] +To upgrade the `Node.js` adapter, see the {securing_apps_base_link}/nodejs-adapter#_upgrade_nodejs_adapter[`Node.js adapter` documentation]. +endif::[] -.Procedure -. Download the new adapter archive. -. Remove the existing `Node.js` adapter directory -. Unzip the updated file into its place -. Change the dependency for keycloak-connect in the `package.json` of your application +ifeval::[{project_product}==true] +To upgrade the `Node.js` adapter, see the {securing_apps_link}/nodejs-adapter-_upgrade_nodejs_adapter[`Node.js adapter` documentation]. +endif::[] diff --git a/docs/guides/assembly.xml b/docs/guides/assembly.xml index 48a321c81a31..e37b98089fa9 100644 --- a/docs/guides/assembly.xml +++ b/docs/guides/assembly.xml @@ -7,49 +7,49 @@ ${project.build.directory} - / + generated-guides/** ${project.basedir}/server - /generated-guides/server/ + generated-guides/server/ pinned-guides ${project.basedir}/getting-started - /generated-guides/getting-started/ + generated-guides/getting-started/ pinned-guides ${project.basedir}/operator - /generated-guides/operator/ + generated-guides/operator/ pinned-guides ${project.basedir}/observability - /generated-guides/observability/ + generated-guides/observability/ pinned-guides ${project.basedir}/high-availability - /generated-guides/high-availability/ + generated-guides/high-availability/ pinned-guides ${project.basedir}/securing-apps - /generated-guides/securing-apps/ + generated-guides/securing-apps/ pinned-guides diff --git a/docs/guides/attributes.adoc b/docs/guides/attributes.adoc index 3382ff849d7f..b2d3f85f9ab5 100644 --- a/docs/guides/attributes.adoc +++ b/docs/guides/attributes.adoc @@ -29,5 +29,6 @@ :quickstartRepo_name: Keycloak Quickstarts Repository :quickstartRepo_dir: keycloak-quickstarts :securing_apps_link: https://www.keycloak.org/guides#securing-apps +:upgrading_guide_link: {project_doc_base_url}/upgrading/ :kc_js_path: /js :kc_realms_path: /realms diff --git a/docs/guides/getting-started/getting-started-docker.adoc b/docs/guides/getting-started/getting-started-docker.adoc index dae6b69bcfe2..89040aafc3f7 100644 --- a/docs/guides/getting-started/getting-started-docker.adoc +++ b/docs/guides/getting-started/getting-started-docker.adoc @@ -2,7 +2,7 @@ <@tmpl.guide title="Docker" -summary="Get started with {project_name} on Docker"> +summary="Get started with {project_name} on Docker."> :containerCommand: docker diff --git a/docs/guides/getting-started/getting-started-kube.adoc b/docs/guides/getting-started/getting-started-kube.adoc index eefe75d2ce86..727de6f402b2 100644 --- a/docs/guides/getting-started/getting-started-kube.adoc +++ b/docs/guides/getting-started/getting-started-kube.adoc @@ -3,7 +3,7 @@ <@tmpl.guide title="Kubernetes" -summary="Get started with {project_name} on Kubernetes"> +summary="Get started with {project_name} on Kubernetes."> :links-admin-console: {project_name} Admin Console :links-account-console: {project_name} Account Console @@ -32,7 +32,7 @@ minikube addons enable ingress The {project_name} QuickStarts repository includes some example files to help deploy {project_name} to Kubernetes. -As a first step, create the {project_name} deployment and service by entering the following command: +As a first step, create the {project_name} statefulset and service by entering the following command: [source,bash,subs="attributes+"] ---- diff --git a/docs/guides/getting-started/getting-started-openshift.adoc b/docs/guides/getting-started/getting-started-openshift.adoc index b7e2540d430a..122346ce28e4 100644 --- a/docs/guides/getting-started/getting-started-openshift.adoc +++ b/docs/guides/getting-started/getting-started-openshift.adoc @@ -3,7 +3,7 @@ <@tmpl.guide title="OpenShift" -summary="Get started with {project_name} on OpenShift"> +summary="Get started with {project_name} on OpenShift."> :links-admin-console: {project_name} Admin Console :links-account-console: {project_name} Account Console diff --git a/docs/guides/getting-started/getting-started-podman.adoc b/docs/guides/getting-started/getting-started-podman.adoc index 51d15660759e..305756aae6d5 100644 --- a/docs/guides/getting-started/getting-started-podman.adoc +++ b/docs/guides/getting-started/getting-started-podman.adoc @@ -2,7 +2,7 @@ <@tmpl.guide title="Podman" -summary="Get started with {project_name} on Podman"> +summary="Get started with {project_name} on Podman."> :containerCommand: podman diff --git a/docs/guides/getting-started/getting-started-scaling-and-tuning.adoc b/docs/guides/getting-started/getting-started-scaling-and-tuning.adoc index 84001d47ad9d..9775dee8e8b7 100644 --- a/docs/guides/getting-started/getting-started-scaling-and-tuning.adoc +++ b/docs/guides/getting-started/getting-started-scaling-and-tuning.adoc @@ -3,7 +3,7 @@ <@tmpl.guide title="Scaling" -summary="Get started with {project_name} scaling and tuning"> +summary="Scale and tune your {project_name} installation."> After starting {project_name}, consider adapting your instance to the required load using these scaling and tuning guidelines: @@ -47,7 +47,7 @@ If you are not using the Operator, please review the following: - Your instances need a way to discover each other. For more information, see discovery in <@links.server id="caching" />. - This cache is not optimal for clusters that span multiple availability zones, which are also called stretch clusters. For embedded Infinispan cache, work to have all instances in one availability zone. The goal is to avoid unnecessary round-trips in the communication that would amplify in the response times. On Kubernetes, use Pod affinity to enforce this grouping of Pods. -- This cache does not gracefully handle multiple members joining or leaving concurrently. In particular, members leaving at the same time can lead to data loss. On Kubernetes, you can use a StatefulSet with the default serial handling to ensure Pods are started and stopped sequentially. +- This cache does not gracefully handle multiple members joining or leaving concurrently. In particular, members leaving at the same time can lead to data loss. On Kubernetes, use a StatefulSet with the default serial handling to ensure Pods are started and stopped sequentially, using a deployment is not supported or recommended. To avoid losing service availability when a whole site is unavailable, see the high availability guide for more information on a multi-site deployment. See <@links.ha id="introduction" />. @@ -58,6 +58,33 @@ Horizontal autoscaling allows for adding or removing {project_name} instances on When using the embedded Infinispan cache cluster, dynamically adding or removing cluster members requires Infinispan to perform a rebalancing of the Infinispan caches, which can get expensive if many entries exist in those caches. To minimize this time we limit number of entries in session related caches to 10000 by default. Note, this optimization is possible only if `persistent-user-sessions` feature is not explicitly disabled in your configuration. -On Kubernetes, the Keycloak custom resource is scalable meaning that it can be targeted by the https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/[built-in autoscaler]. +On Kubernetes, the Keycloak custom resource is scalable meaning that it can be targeted by the https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/[built-in autoscaler]. For example to scale on average CPU utilization: + +[source,yaml] +---- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: keycloak-hpa + namespace: keycloak-cluster +spec: + scaleTargetRef: + apiVersion: k8s.keycloak.org/v2alpha1 + kind: Keycloak + name: keycloak + minReplicas: 2 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 +---- + +NOTE: Scaling on memory is generally not needed with persistent sessions enabled, and should not be needed at all when using remote {jdgserver_name}. If you are using persistent sessions or remote {jdgserver_name} and you experience memory issues, it is best to fully diagnose the problem and revisit the <@links.ha id="concepts-memory-and-cpu-sizing" /> guide. Adjusting the memory request and limit is preferable to horizontal scaling. + +Consult the https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/[Kubernetes docs] for additional information, including the usage of https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-multiple-metrics-and-custom-metrics[custom metrics]. diff --git a/docs/guides/getting-started/getting-started-zip.adoc b/docs/guides/getting-started/getting-started-zip.adoc index efd8fa43b71b..b40e610205a8 100644 --- a/docs/guides/getting-started/getting-started-zip.adoc +++ b/docs/guides/getting-started/getting-started-zip.adoc @@ -3,7 +3,7 @@ <@tmpl.guide title="OpenJDK" -summary="Get started with {project_name} on bare metal"> +summary="Get started with {project_name} on a physical or virtual server."> :links-local: true :links-admin-console: http://localhost:8080/admin[{project_name} Admin Console] diff --git a/docs/guides/getting-started/templates/first-app.adoc b/docs/guides/getting-started/templates/first-app.adoc index 17ea9b3fc1cb..eb585b0aaa0d 100644 --- a/docs/guides/getting-started/templates/first-app.adoc +++ b/docs/guides/getting-started/templates/first-app.adoc @@ -3,14 +3,15 @@ To secure the first application, you start by registering the application with your {project_name} instance: . Open the {links-admin-console}. -. Click the word *master* in the top-left corner, then click *myrealm*. +. Click *myrealm* next to *Current realm*. . Click *Clients*. . Click *Create client* . Fill in the form with the following values: ** *Client type*: `OpenID Connect` ** *Client ID*: `myclient` + -image::add-client-1.png[Add Client] +.Add client +image::add-client-1.png[Add client] . Click *Next* . Confirm that *Standard flow* is enabled. . Click *Next*. @@ -19,7 +20,8 @@ image::add-client-1.png[Add Client] * Set *Web origins* to `+https://www.keycloak.org+` . Click *Save*. -image::add-client-2.png[Update Client] +.Update client +image::add-client-2.png[Update client] To confirm the client was created successfully, you can use the SPA testing application on the https://www.keycloak.org/app/[Keycloak website]. diff --git a/docs/guides/getting-started/templates/realm-config.adoc b/docs/guides/getting-started/templates/realm-config.adoc index 13544d0bbc05..fd0b5cc65201 100644 --- a/docs/guides/getting-started/templates/realm-config.adoc +++ b/docs/guides/getting-started/templates/realm-config.adoc @@ -11,17 +11,18 @@ includes a single realm, called `master`. Use this realm only for managing {proj Use these steps to create the first realm. . Open the {links-admin-console}. -. Click *{project_name}* next to *master realm*, then click *Create Realm*. +. Click *Create Realm* next to *Current realm*. . Enter `myrealm` in the *Realm name* field. . Click *Create*. +.Add realm image::add-realm.png[Add realm] == Create a user Initially, the realm has no users. Use these steps to create a user: -. Verify that you are still in the *myrealm* realm, which is shown above the word *Manage*. +. Verify that you are still in the *myrealm* realm, which is next to *Current realm*. . Click *Users* in the left-hand menu. . Click *Create new user*. . Fill in the form with the following values: @@ -30,6 +31,7 @@ Initially, the realm has no users. Use these steps to create a user: ** *Last name*: any last name . Click *Create*. +.Create user image::add-user.png[Create user] This user needs a password to log in. To set the initial password: @@ -38,4 +40,5 @@ This user needs a password to log in. To set the initial password: . Fill in the *Set password* form with a password. . Toggle *Temporary* to *Off* so that the user does not need to update this password at the first login. +.Set password image::set-password.png[Set password] diff --git a/docs/guides/getting-started/templates/start-keycloak-container.adoc b/docs/guides/getting-started/templates/start-keycloak-container.adoc index f66392974d0d..87c32f39b72e 100644 --- a/docs/guides/getting-started/templates/start-keycloak-container.adoc +++ b/docs/guides/getting-started/templates/start-keycloak-container.adoc @@ -4,7 +4,7 @@ From a terminal, enter the following command to start {project_name}: [source,bash,subs="attributes+"] ---- -{containerCommand} run -p 8080:8080 -e KC_BOOTSTRAP_ADMIN_USERNAME=admin -e KC_BOOTSTRAP_ADMIN_PASSWORD=admin quay.io/keycloak/keycloak:{version} start-dev +{containerCommand} run -p 127.0.0.1:8080:8080 -e KC_BOOTSTRAP_ADMIN_USERNAME=admin -e KC_BOOTSTRAP_ADMIN_PASSWORD=admin quay.io/keycloak/keycloak:{version} start-dev ---- This command starts {project_name} exposed on the local port 8080 and creates an initial admin user with the username `admin` diff --git a/docs/guides/high-availability/bblocks-multi-site.adoc b/docs/guides/high-availability/bblocks-multi-site.adoc index 6593b98ef962..6d4375e6626b 100644 --- a/docs/guides/high-availability/bblocks-multi-site.adoc +++ b/docs/guides/high-availability/bblocks-multi-site.adoc @@ -3,7 +3,7 @@ <@tmpl.guide title="Building blocks multi-site deployments" -summary="Overview of building blocks, alternatives and not considered options" > +summary="Learn about building blocks and suggested setups for multi-site deployments." > The following building blocks are needed to set up a multi-site deployment with synchronous replication. diff --git a/docs/guides/high-availability/concepts-database-connections.adoc b/docs/guides/high-availability/concepts-database-connections.adoc index afb4aee788bf..e9108ee486ba 100644 --- a/docs/guides/high-availability/concepts-database-connections.adoc +++ b/docs/guides/high-availability/concepts-database-connections.adoc @@ -3,7 +3,7 @@ <@tmpl.guide title="Concepts for database connection pools" -summary="Understand these concepts to avoid resource exhaustion and congestion" +summary="Understand concepts for avoiding resource exhaustion and congestion." tileVisible="false" > This section is intended when you want to understand considerations and best practices on how to configure database connection pools for {project_name}. diff --git a/docs/guides/high-availability/concepts-memory-and-cpu-sizing.adoc b/docs/guides/high-availability/concepts-memory-and-cpu-sizing.adoc index 9708ecc689f6..a897ddc5d80c 100644 --- a/docs/guides/high-availability/concepts-memory-and-cpu-sizing.adoc +++ b/docs/guides/high-availability/concepts-memory-and-cpu-sizing.adoc @@ -3,7 +3,7 @@ <@tmpl.guide title="Concepts for sizing CPU and memory resources" -summary="Understand these concepts to avoid resource exhaustion and congestion" +summary="Understand concepts for avoiding resource exhaustion and congestion." tileVisible="false" > Use this as a starting point to size a product environment. @@ -150,7 +150,7 @@ The benefit of this setup is that the number of Pods does not need to scale duri The following setup was used to retrieve the settings above to run tests of about 10 minutes for different scenarios: -* OpenShift 4.16.x deployed on AWS via ROSA. +* OpenShift 4.17.x deployed on AWS via ROSA. * Machine pool with `c7g.2xlarge` instances.^*^ * {project_name} deployed with the Operator and 3 pods in a high-availability setup with two sites in active/active mode. * OpenShift's reverse proxy runs in the passthrough mode where the TLS connection of the client is terminated at the Pod. diff --git a/docs/guides/high-availability/concepts-multi-site.adoc b/docs/guides/high-availability/concepts-multi-site.adoc index d2f463a79ba3..946650d542ef 100644 --- a/docs/guides/high-availability/concepts-multi-site.adoc +++ b/docs/guides/high-availability/concepts-multi-site.adoc @@ -3,7 +3,7 @@ <@tmpl.guide title="Concepts for multi-site deployments" -summary="Understanding a multi-site deployment with synchronous replication" > +summary="Understand multi-site deployment with synchronous replication." > This topic describes a highly available multi-site setup and the behavior to expect. It outlines the requirements of the high availability architecture and describes the benefits and tradeoffs. diff --git a/docs/guides/high-availability/concepts-threads.adoc b/docs/guides/high-availability/concepts-threads.adoc index baaac0932c55..f7c8800e833f 100644 --- a/docs/guides/high-availability/concepts-threads.adoc +++ b/docs/guides/high-availability/concepts-threads.adoc @@ -3,7 +3,7 @@ <@tmpl.guide title="Concepts for configuring thread pools" -summary="Understand these concepts to avoid resource exhaustion and congestion" +summary="Understand concepts for avoiding resource exhaustion and congestion." tileVisible="false" > @@ -17,7 +17,7 @@ For a configuration where this is applied, visit <@links.ha id="deploy-keycloak- // remove this paragraph once OpenJDK 17 is no longer supported on the server side. // https://github.com/keycloak/keycloak/issues/31101 -JGroups communications, which is used in single-site setups for the communication between {project_name} nodes, benefits from the use of virtual threads which are available in OpenJDK 21. +JGroups communications, which is used in single-site setups for the communication between {project_name} nodes, benefits from the use of virtual threads which are available in OpenJDK 21 when at least two cores are available for {project_name}. This reduces the memory usage and removes the need to configure thread pool sizes. Therefore, the use of OpenJDK 21 is recommended. diff --git a/docs/guides/high-availability/deploy-aurora-multi-az.adoc b/docs/guides/high-availability/deploy-aurora-multi-az.adoc index ce6545bc412d..e1b782959543 100644 --- a/docs/guides/high-availability/deploy-aurora-multi-az.adoc +++ b/docs/guides/high-availability/deploy-aurora-multi-az.adoc @@ -2,8 +2,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Deploy AWS Aurora in multiple availability zones" -summary="Building block for a database" +title="Deploying AWS Aurora in multiple availability zones" +summary="Deploy an AWS Aurora as the database building block in a multi-site deployment." tileVisible="false" > This topic describes how to deploy an Aurora regional deployment of a PostgreSQL instance across multiple availability zones to tolerate one or more availability zone failures in a given AWS region. diff --git a/docs/guides/high-availability/deploy-aws-accelerator-fencing-lambda.adoc b/docs/guides/high-availability/deploy-aws-accelerator-fencing-lambda.adoc index aa2d75ea2f83..2b2468ae553d 100644 --- a/docs/guides/high-availability/deploy-aws-accelerator-fencing-lambda.adoc +++ b/docs/guides/high-availability/deploy-aws-accelerator-fencing-lambda.adoc @@ -2,8 +2,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Deploy an AWS Lambda to disable a non-responding site" -summary="Building block for load balancer resilience" +title="Deploying an AWS Lambda to disable a non-responding site" +summary="Deploy an AWS Lambda as part of the load-balancer building block in a multi-site deployment." tileVisible="false" > This {section} explains how to resolve split-brain scenarios between two sites in a multi-site deployment. diff --git a/docs/guides/high-availability/deploy-aws-accelerator-loadbalancer.adoc b/docs/guides/high-availability/deploy-aws-accelerator-loadbalancer.adoc index 8a6de2269155..41a2539109d9 100644 --- a/docs/guides/high-availability/deploy-aws-accelerator-loadbalancer.adoc +++ b/docs/guides/high-availability/deploy-aws-accelerator-loadbalancer.adoc @@ -2,8 +2,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Deploy an AWS Global Accelerator load balancer" -summary="Building block for a load balancer" +title="Deploying an AWS Global Accelerator load balancer" +summary="Deploy an AWS Global Accelerator as the load-balancer building block in a multi-site deployment." tileVisible="false" > This topic describes the procedure required to deploy an AWS Global Accelerator to route traffic between multi-site {project_name} deployments. diff --git a/docs/guides/high-availability/deploy-infinispan-kubernetes-crossdc.adoc b/docs/guides/high-availability/deploy-infinispan-kubernetes-crossdc.adoc index 5994c5c2a0d1..15cf8f546631 100644 --- a/docs/guides/high-availability/deploy-infinispan-kubernetes-crossdc.adoc +++ b/docs/guides/high-availability/deploy-infinispan-kubernetes-crossdc.adoc @@ -3,8 +3,8 @@ <#import "/templates/profile.adoc" as profile> <@tmpl.guide -title="Deploy {jdgserver_name} for HA with the {jdgserver_name} Operator" -summary="Building block for an {jdgserver_name} deployment on Kubernetes" +title="Deploying {jdgserver_name} for HA with the {jdgserver_name} Operator" +summary="Deploy {jdgserver_name} for high availability in multi availability zones on Kubernetes." tileVisible="false" includedOptions="cache-remote-*"> @@ -304,9 +304,6 @@ include::examples/generated/keycloak-ispn.yaml[tag=keycloak-ispn] This is optional and it defaults to `11222`. <3> The Secret `name` and `key` with the {jdgserver_name} username credential. <4> The Secret `name` and `key` with the {jdgserver_name} password credential. -<5> The `spi-connections-infinispan-quarkus-site-name` is an arbitrary {jdgserver_name} site name which {project_name} needs for its Infinispan caches deployment when a remote store is used. -This site-name is related only to the Infinispan caches and does not need to match any value from the external {jdgserver_name} deployment. -If you are using multiple sites for {project_name} in a cross-DC setup such as <@links.ha id="deploy-infinispan-kubernetes-crossdc" />, the site name must be different in each site. === Architecture diff --git a/docs/guides/high-availability/deploy-keycloak-kubernetes.adoc b/docs/guides/high-availability/deploy-keycloak-kubernetes.adoc index acae4135eafa..28b493175a4b 100644 --- a/docs/guides/high-availability/deploy-keycloak-kubernetes.adoc +++ b/docs/guides/high-availability/deploy-keycloak-kubernetes.adoc @@ -2,8 +2,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Deploy {project_name} for HA with the {project_name} Operator" -summary="Building block for a {project_name} deployment" +title="Deploying {project_name} for HA with the Operator" +summary="Deploy {project_name} for high availability with the {project_name} Operator as a building block." tileVisible="false" > This guide describes advanced {project_name} configurations for Kubernetes which are load tested and will recover from single Pod failures. @@ -43,10 +43,7 @@ As most requests will not touch the database due to the {project_name} embedded See the <@links.ha id="concepts-database-connections" /> {section} for details. <2> Specify the URL to your custom {project_name} image. If your image is optimized, set the `startOptimized` flag to `true`. <3> Enable additional features for multi-site support like the loadbalancer probe `/lb-check`. -<4> XA transactions are not supported by the https://github.com/awslabs/aws-advanced-jdbc-wrapper/releases/[Amazon Web Services JDBC Driver]. -<5> To be able to analyze the system under load, enable the metrics endpoint. -The disadvantage of the setting is that the metrics will be available at the external {project_name} endpoint, so you must add a filter so that the endpoint is not available from the outside. -Use a reverse proxy in front of {project_name} to filter out those URLs. +<4> To be able to analyze the system under load, enable the metrics endpoint. == Verifying the deployment diff --git a/docs/guides/high-availability/examples/generated/ispn-single.yaml b/docs/guides/high-availability/examples/generated/ispn-single.yaml index a820756add4e..cd68883c4c7c 100644 --- a/docs/guides/high-availability/examples/generated/ispn-single.yaml +++ b/docs/guides/high-availability/examples/generated/ispn-single.yaml @@ -24,6 +24,12 @@ data: metrics: namesAsTags: true histograms: false + tracing: + enabled: true + collector-endpoint: "http://tempo-tempo.monitoring.svc:4318" + exporter-protocol: "OTLP" + service-name: "infinispan-server" + security: false server: endpoints: - securityRealm: default @@ -305,12 +311,12 @@ spec: expose: type: Route configMapName: "cluster-config" - image: quay.io/infinispan/server:15.0.11.Final - version: 15.0.4 + image: quay.io/infinispan/server:15.0.15.Final + version: 15.0.15 configListener: enabled: false container: - extraJvmOpts: '-Dorg.infinispan.openssl=false -Dinfinispan.cluster.name=ISPN -Djgroups.xsite.fd.interval=2000 -Djgroups.xsite.fd.timeout=15000' + extraJvmOpts: '-Dorg.infinispan.openssl=false -Dorg.infinispan.threads.virtual=true -Dinfinispan.cluster.name=ISPN -Djgroups.xsite.fd.interval=2000 -Djgroups.xsite.fd.timeout=15000 -Dotel.traces.sampler=parentbased_always_off' cpu: 4:2 memory: 2Gi:1Gi logging: diff --git a/docs/guides/high-availability/examples/generated/ispn-site-a.yaml b/docs/guides/high-availability/examples/generated/ispn-site-a.yaml index fa331d38a824..7951e9cd2101 100644 --- a/docs/guides/high-availability/examples/generated/ispn-site-a.yaml +++ b/docs/guides/high-availability/examples/generated/ispn-site-a.yaml @@ -36,6 +36,12 @@ data: metrics: namesAsTags: true histograms: false + tracing: + enabled: true + collector-endpoint: "http://tempo-tempo.monitoring.svc:4318" + exporter-protocol: "OTLP" + service-name: "infinispan-server" + security: false server: endpoints: - securityRealm: default @@ -339,12 +345,12 @@ spec: expose: type: Route configMapName: "cluster-config" - image: quay.io/infinispan/server:15.0.11.Final - version: 15.0.4 + image: + version: 15.0.15 configListener: enabled: false container: - extraJvmOpts: '-Dorg.infinispan.openssl=false -Dinfinispan.cluster.name=ISPN -Djgroups.xsite.fd.interval=2000 -Djgroups.xsite.fd.timeout=10000' + extraJvmOpts: '-Dorg.infinispan.openssl=false -Dorg.infinispan.threads.virtual=true -Dinfinispan.cluster.name=ISPN -Djgroups.xsite.fd.interval=2000 -Djgroups.xsite.fd.timeout=10000 -Dotel.traces.sampler=parentbased_always_off' logging: categories: org.infinispan: info diff --git a/docs/guides/high-availability/examples/generated/ispn-site-b.yaml b/docs/guides/high-availability/examples/generated/ispn-site-b.yaml index e730098c1a05..397f5852e25d 100644 --- a/docs/guides/high-availability/examples/generated/ispn-site-b.yaml +++ b/docs/guides/high-availability/examples/generated/ispn-site-b.yaml @@ -36,6 +36,12 @@ data: metrics: namesAsTags: true histograms: false + tracing: + enabled: true + collector-endpoint: "http://tempo-tempo.monitoring.svc:4318" + exporter-protocol: "OTLP" + service-name: "infinispan-server" + security: false server: endpoints: - securityRealm: default @@ -339,12 +345,12 @@ spec: expose: type: Route configMapName: "cluster-config" - image: quay.io/infinispan/server:15.0.11.Final - version: 15.0.4 + image: + version: 15.0.15 configListener: enabled: false container: - extraJvmOpts: '-Dorg.infinispan.openssl=false -Dinfinispan.cluster.name=ISPN -Djgroups.xsite.fd.interval=2000 -Djgroups.xsite.fd.timeout=10000' + extraJvmOpts: '-Dorg.infinispan.openssl=false -Dorg.infinispan.threads.virtual=true -Dinfinispan.cluster.name=ISPN -Djgroups.xsite.fd.interval=2000 -Djgroups.xsite.fd.timeout=10000 -Dotel.traces.sampler=parentbased_always_off' cpu: 4:2 memory: 2Gi:1Gi logging: diff --git a/docs/guides/high-availability/examples/generated/ispn-volatile.yaml b/docs/guides/high-availability/examples/generated/ispn-volatile.yaml index d126a78e5875..361dfae06886 100644 --- a/docs/guides/high-availability/examples/generated/ispn-volatile.yaml +++ b/docs/guides/high-availability/examples/generated/ispn-volatile.yaml @@ -36,6 +36,12 @@ data: metrics: namesAsTags: true histograms: false + tracing: + enabled: true + collector-endpoint: "http://tempo-tempo.monitoring.svc:4318" + exporter-protocol: "OTLP" + service-name: "infinispan-server" + security: false server: endpoints: - securityRealm: default @@ -507,12 +513,12 @@ spec: expose: type: Route configMapName: "cluster-config" - image: quay.io/infinispan/server:15.0.11.Final - version: 15.0.4 + image: + version: 15.0.15 configListener: enabled: false container: - extraJvmOpts: '-Dorg.infinispan.openssl=false -Dinfinispan.cluster.name=ISPN -Djgroups.xsite.fd.interval=2000 -Djgroups.xsite.fd.timeout=10000' + extraJvmOpts: '-Dorg.infinispan.openssl=false -Dorg.infinispan.threads.virtual=true -Dinfinispan.cluster.name=ISPN -Djgroups.xsite.fd.interval=2000 -Djgroups.xsite.fd.timeout=10000 -Dotel.traces.sampler=parentbased_always_off' cpu: 4:2 memory: 2Gi:1Gi logging: diff --git a/docs/guides/high-availability/examples/generated/keycloak-ispn.yaml b/docs/guides/high-availability/examples/generated/keycloak-ispn.yaml index 6b8f5bf36bb8..7a6e27ed209e 100644 --- a/docs/guides/high-availability/examples/generated/keycloak-ispn.yaml +++ b/docs/guides/high-availability/examples/generated/keycloak-ispn.yaml @@ -54,7 +54,7 @@ metadata: name: keycloak-providers namespace: keycloak binaryData: - keycloak-benchmark-dataset-0.15-SNAPSHOT.jar: ... + keycloak-benchmark-dataset-999.0.0-SNAPSHOT.jar: ... --- # Source: keycloak/templates/postgres/postgres-exporter-configmap.yaml apiVersion: v1 @@ -206,7 +206,7 @@ spec: value: keycloak - name: POSTGRES_DB value: keycloak - image: postgres:15 + image: mirror.gcr.io/postgres:15 volumeMounts: # Using volume mount for PostgreSQL's data folder as it is otherwise not writable - mountPath: /var/lib/postgresql @@ -351,7 +351,7 @@ spec: - name: SQLPAD_CONNECTIONS__pgdemo__username value: keycloak - name: SQLPAD_CONNECTIONS__pgdemo__password - value: pass + value: secret99 - name: SQLPAD_CONNECTIONS__pgdemo__database value: keycloak - name: SQLPAD_CONNECTIONS__pgdemo__driver @@ -362,7 +362,7 @@ spec: value: '86400' - name: SQLPAD_QUERY_RESULT_MAX_ROWS value: '100000' - image: sqlpad/sqlpad:6.11.0 + image: mirror.gcr.io/sqlpad/sqlpad:6.11.0 imagePullPolicy: Always startupProbe: httpGet: @@ -451,14 +451,10 @@ spec: startOptimized: false # <2> features: enabled: - - user-event-metrics - multi-site # <3> - transaction: - xaEnabled: false # <4> # tag::keycloak-ispn[] additionalOptions: # end::keycloak-ispn[] - # end::keycloak[] - name: http-metrics-histograms-enabled value: 'true' @@ -473,7 +469,7 @@ spec: # tag::keycloak[] - name: log-console-output value: json - - name: metrics-enabled # <5> + - name: metrics-enabled # <4> value: 'true' - name: event-metrics-user-enabled value: 'true' @@ -490,10 +486,9 @@ spec: secret: name: remote-store-secret key: password - - name: spi-connections-infinispan-quarkus-site-name # <5> - value: keycloak - # end::keycloak-ispn[] - name: db-driver + # end::keycloak-ispn[] + value: software.amazon.jdbc.Driver http: tlsSecret: keycloak-tls-secret @@ -503,7 +498,7 @@ spec: podTemplate: metadata: annotations: - checksum/config: a6e4c8f98e1b1035942cd1121684f817d533021a392be90b5df784f474146350-9bfd430c6539df907f0421bb34c92fb32194d461565bd342f7f96ff5a5408273--01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + checksum/config: 74d09d74f6547eec9888f07648f5f5be52afa1be1a6231f286cbd368e86e6f19-099f6e0e31165c359aa5534e8dc6e42b603410742f45fefbc62d923ea6cb7e64--01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b spec: containers: - env: @@ -520,10 +515,11 @@ spec: name: keycloak-preconfigured-admin key: password optional: false - - name: JAVA_OPTS_APPEND # <5> - value: "" + - name: JAVA_OPTS_APPEND + value: > + -Djdk.tracePinnedThreads=full + ports: - # end::keycloak[] # readinessProbe: # exec: # command: @@ -534,8 +530,8 @@ spec: # - 'true' volumeMounts: - name: keycloak-providers - mountPath: /opt/keycloak/providers/keycloak-benchmark-dataset-0.15-SNAPSHOT.jar - subPath: keycloak-benchmark-dataset-0.15-SNAPSHOT.jar + mountPath: /opt/keycloak/providers/keycloak-benchmark-dataset-999.0.0-SNAPSHOT.jar + subPath: keycloak-benchmark-dataset-999.0.0-SNAPSHOT.jar readOnly: true volumes: - name: keycloak-providers @@ -552,11 +548,26 @@ spec: selector: matchLabels: app: keycloak + # Use pod target labels "as is" without any renaming + # podTargetLabels: + # - app + # Since at least Keycloak 26.2 and the latest Quarkus 3.19 version, it requires "OpenMetricsText1.0.0" to retrieve exemplars, + # as at least some of the other protocols don't support exemplars. + scrapeProtocols: + - OpenMetricsText1.0.0 podMetricsEndpoints: - port: management scheme: https tlsConfig: insecureSkipVerify: true + relabelings: + - targetLabel: application + # Alternative: hard-coded value + # replacement: "keycloak" + sourceLabels: + - __meta_kubernetes_pod_label_app + regex: (.+) + replacement: ${1} --- # Source: keycloak/templates/postgres/postgres-exporter.yaml apiVersion: monitoring.coreos.com/v1 diff --git a/docs/guides/high-availability/examples/generated/keycloak.yaml b/docs/guides/high-availability/examples/generated/keycloak.yaml index 522716939809..1bed3e371a71 100644 --- a/docs/guides/high-availability/examples/generated/keycloak.yaml +++ b/docs/guides/high-availability/examples/generated/keycloak.yaml @@ -41,7 +41,7 @@ metadata: name: keycloak-providers namespace: keycloak binaryData: - keycloak-benchmark-dataset-0.15-SNAPSHOT.jar: ... + keycloak-benchmark-dataset-999.0.0-SNAPSHOT.jar: ... --- # Source: keycloak/templates/postgres/postgres-exporter-configmap.yaml apiVersion: v1 @@ -193,7 +193,7 @@ spec: value: keycloak - name: POSTGRES_DB value: keycloak - image: postgres:15 + image: mirror.gcr.io/postgres:15 volumeMounts: # Using volume mount for PostgreSQL's data folder as it is otherwise not writable - mountPath: /var/lib/postgresql @@ -338,7 +338,7 @@ spec: - name: SQLPAD_CONNECTIONS__pgdemo__username value: keycloak - name: SQLPAD_CONNECTIONS__pgdemo__password - value: pass + value: secret99 - name: SQLPAD_CONNECTIONS__pgdemo__database value: keycloak - name: SQLPAD_CONNECTIONS__pgdemo__driver @@ -349,7 +349,7 @@ spec: value: '86400' - name: SQLPAD_QUERY_RESULT_MAX_ROWS value: '100000' - image: sqlpad/sqlpad:6.11.0 + image: mirror.gcr.io/sqlpad/sqlpad:6.11.0 imagePullPolicy: Always startupProbe: httpGet: @@ -440,14 +440,10 @@ spec: startOptimized: false # <2> features: enabled: - - user-event-metrics - multi-site # <3> - transaction: - xaEnabled: false # <4> # tag::keycloak-ispn[] additionalOptions: # end::keycloak-ispn[] - # end::keycloak[] - name: http-metrics-histograms-enabled value: 'true' @@ -462,7 +458,7 @@ spec: # tag::keycloak[] - name: log-console-output value: json - - name: metrics-enabled # <5> + - name: metrics-enabled # <4> value: 'true' - name: event-metrics-user-enabled value: 'true' @@ -481,8 +477,6 @@ spec: secret: name: remote-store-secret key: password - - name: spi-connections-infinispan-quarkus-site-name - value: keycloak - name: db-driver value: software.amazon.jdbc.Driver http: @@ -493,7 +487,7 @@ spec: podTemplate: metadata: annotations: - checksum/config: a6e4c8f98e1b1035942cd1121684f817d533021a392be90b5df784f474146350-9af6f9e8393229798cfb789798e36f84e39803616fe3e51b2a38e3ce05830565--01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + checksum/config: 74d09d74f6547eec9888f07648f5f5be52afa1be1a6231f286cbd368e86e6f19-b9788fd7a0f3ed13e27c33f89e0e1019fc1fb7d445005dada32fab73b68c335b--01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b spec: containers: - env: @@ -510,10 +504,11 @@ spec: name: keycloak-preconfigured-admin key: password optional: false - - name: JAVA_OPTS_APPEND # <5> - value: "" + - name: JAVA_OPTS_APPEND + value: > + -Djdk.tracePinnedThreads=full + ports: - # end::keycloak[] # readinessProbe: # exec: # command: @@ -524,8 +519,8 @@ spec: # - 'true' volumeMounts: - name: keycloak-providers - mountPath: /opt/keycloak/providers/keycloak-benchmark-dataset-0.15-SNAPSHOT.jar - subPath: keycloak-benchmark-dataset-0.15-SNAPSHOT.jar + mountPath: /opt/keycloak/providers/keycloak-benchmark-dataset-999.0.0-SNAPSHOT.jar + subPath: keycloak-benchmark-dataset-999.0.0-SNAPSHOT.jar readOnly: true volumes: - name: keycloak-providers @@ -542,11 +537,26 @@ spec: selector: matchLabels: app: keycloak + # Use pod target labels "as is" without any renaming + # podTargetLabels: + # - app + # Since at least Keycloak 26.2 and the latest Quarkus 3.19 version, it requires "OpenMetricsText1.0.0" to retrieve exemplars, + # as at least some of the other protocols don't support exemplars. + scrapeProtocols: + - OpenMetricsText1.0.0 podMetricsEndpoints: - port: management scheme: https tlsConfig: insecureSkipVerify: true + relabelings: + - targetLabel: application + # Alternative: hard-coded value + # replacement: "keycloak" + sourceLabels: + - __meta_kubernetes_pod_label_app + regex: (.+) + replacement: ${1} --- # Source: keycloak/templates/postgres/postgres-exporter.yaml apiVersion: monitoring.coreos.com/v1 diff --git a/docs/guides/high-availability/health-checks-multi-site.adoc b/docs/guides/high-availability/health-checks-multi-site.adoc index 01deaaf4d561..09b847999052 100644 --- a/docs/guides/high-availability/health-checks-multi-site.adoc +++ b/docs/guides/high-availability/health-checks-multi-site.adoc @@ -3,7 +3,7 @@ <@tmpl.guide title="Health checks for multi-site deployments" -summary="Validating the health of a multi-site deployment" > +summary="Validate the health of a multi-site deployment." > When running the <@links.ha id="introduction" /> in a Kubernetes environment, you should automate checks to see if everything is up and running as expected. diff --git a/docs/guides/high-availability/introduction.adoc b/docs/guides/high-availability/introduction.adoc index 537480879a61..963d8e260625 100644 --- a/docs/guides/high-availability/introduction.adoc +++ b/docs/guides/high-availability/introduction.adoc @@ -4,7 +4,7 @@ <@tmpl.guide title="Multi-site deployments" -summary="Connect multiple {project_name} deployments in different sites to increase the overall availability" > +summary="Connect multiple {project_name} deployments in different sites to increase the overall availability." > {project_name} supports deployments that consist of multiple {project_name} instances that connect to each other using its Infinispan caches; load balancers can distribute the load evenly across those instances. Those setups are intended for a transparent network on a single site. @@ -48,10 +48,10 @@ using ROSA HCP. ** Each Openshift cluster has all its workers in a single Availability Zone. ** OpenShift version <@profile.ifProduct> -4.16 (or later). +4.17 (or later). <@profile.ifCommunity> -4.16. +4.17. * Amazon Aurora PostgreSQL database diff --git a/docs/guides/high-availability/operate-site-offline.adoc b/docs/guides/high-availability/operate-site-offline.adoc index 7896548a23b1..36fa9c5724eb 100644 --- a/docs/guides/high-availability/operate-site-offline.adoc +++ b/docs/guides/high-availability/operate-site-offline.adoc @@ -2,8 +2,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Take site offline" -summary="This describes how to take a site offline so that it no longer processes client requests" > +title="Taking a site offline" +summary="Take a site offline so that it no longer processes client requests." > == When to use this procedure diff --git a/docs/guides/high-availability/operate-site-online.adoc b/docs/guides/high-availability/operate-site-online.adoc index 2e8dfe903c63..4f6304dcffc5 100644 --- a/docs/guides/high-availability/operate-site-online.adoc +++ b/docs/guides/high-availability/operate-site-online.adoc @@ -2,8 +2,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Bring site online" -summary="This guide describes how to bring a site online so that it can process client requests." > +title="Bringing a site online" +summary="Bring a site online so that it can process client requests." > == When to use this procedure diff --git a/docs/guides/high-availability/operate-synchronize.adoc b/docs/guides/high-availability/operate-synchronize.adoc index b1f416f46248..84db04966a6c 100644 --- a/docs/guides/high-availability/operate-synchronize.adoc +++ b/docs/guides/high-availability/operate-synchronize.adoc @@ -2,8 +2,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Synchronize Sites" -summary="This describes the procedures required to synchronize an offline site with an online site" > +title="Synchronizing sites" +summary="Synchronize an offline site with an online site." > include::partials/infinispan/infinispan-attributes.adoc[] diff --git a/docs/guides/high-availability/partials/aurora/aurora-multiaz-create-procedure.adoc b/docs/guides/high-availability/partials/aurora/aurora-multiaz-create-procedure.adoc index d1a7b112097c..7f47c0d572b3 100644 --- a/docs/guides/high-availability/partials/aurora/aurora-multiaz-create-procedure.adoc +++ b/docs/guides/high-availability/partials/aurora/aurora-multiaz-create-procedure.adoc @@ -306,6 +306,7 @@ The values specified here must be used when configuring the {project_name} datab [source,bash] ---- aws rds create-db-instance \ + --no-auto-minor-version-upgrade \ --db-cluster-identifier keycloak-aurora \ --db-instance-identifier "keycloak-aurora-instance-1" \ --db-instance-class db.t4g.large \ @@ -319,6 +320,7 @@ The values specified here must be used when configuring the {project_name} datab [source,bash] ---- aws rds create-db-instance \ + --no-auto-minor-version-upgrade \ --db-cluster-identifier keycloak-aurora \ --db-instance-identifier "keycloak-aurora-instance-2" \ --db-instance-class db.t4g.large \ diff --git a/docs/guides/high-availability/pinned-guides b/docs/guides/high-availability/pinned-guides index 01a52a59d989..4883a9fb043c 100644 --- a/docs/guides/high-availability/pinned-guides +++ b/docs/guides/high-availability/pinned-guides @@ -1,13 +1,16 @@ introduction concepts-multi-site bblocks-multi-site +concepts-database-connections +concepts-threads +concepts-memory-and-cpu-sizing +concepts-infinispan-cli-batch deploy-aurora-multi-az deploy-infinispan-kubernetes-crossdc deploy-keycloak-kubernetes -deploy-aws-route53-loadbalancer -deploy-aws-route53-failover-lambda -health-checks-multi-site -operate-failover -operate-switch-over -operate-network-partition-recovery -operate-switch-back \ No newline at end of file +deploy-aws-accelerator-loadbalancer +deploy-aws-accelerator-fencing-lambda +operate-site-offline +operate-site-online +operate-synchronize +health-checks-multi-site \ No newline at end of file diff --git a/docs/guides/images/add-client-1.png b/docs/guides/images/add-client-1.png index a981b9db36a4..956043d3bd1b 100644 Binary files a/docs/guides/images/add-client-1.png and b/docs/guides/images/add-client-1.png differ diff --git a/docs/guides/images/add-client-2.png b/docs/guides/images/add-client-2.png index 2e1274eaf285..2f7fec8b7641 100644 Binary files a/docs/guides/images/add-client-2.png and b/docs/guides/images/add-client-2.png differ diff --git a/docs/guides/images/add-realm.png b/docs/guides/images/add-realm.png index eb9a194b0a65..19bf11161667 100644 Binary files a/docs/guides/images/add-realm.png and b/docs/guides/images/add-realm.png differ diff --git a/docs/guides/images/add-user.png b/docs/guides/images/add-user.png index 1ca1384a0068..e823c9d774c3 100644 Binary files a/docs/guides/images/add-user.png and b/docs/guides/images/add-user.png differ diff --git a/docs/guides/images/manual-approval-olm.png b/docs/guides/images/manual-approval-olm.png new file mode 100644 index 000000000000..5a60b6614ba5 Binary files /dev/null and b/docs/guides/images/manual-approval-olm.png differ diff --git a/docs/guides/images/observability/exemplar.png b/docs/guides/images/observability/exemplar.png new file mode 100644 index 000000000000..14f35f65f4c2 Binary files /dev/null and b/docs/guides/images/observability/exemplar.png differ diff --git a/docs/guides/images/observability/grafana-share-icon.svg b/docs/guides/images/observability/grafana-share-icon.svg new file mode 100644 index 000000000000..5a96a8a6181d --- /dev/null +++ b/docs/guides/images/observability/grafana-share-icon.svg @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/docs/guides/images/observability/keycloak-capacity-planning-dashboard.png b/docs/guides/images/observability/keycloak-capacity-planning-dashboard.png new file mode 100644 index 000000000000..38d3e1c61310 Binary files /dev/null and b/docs/guides/images/observability/keycloak-capacity-planning-dashboard.png differ diff --git a/docs/guides/images/observability/keycloak-troubleshooting-grafana-dashboard.png b/docs/guides/images/observability/keycloak-troubleshooting-grafana-dashboard.png new file mode 100644 index 000000000000..1341460ea6c2 Binary files /dev/null and b/docs/guides/images/observability/keycloak-troubleshooting-grafana-dashboard.png differ diff --git a/docs/guides/images/token-exchange-switch-refresh.png b/docs/guides/images/token-exchange-switch-refresh.png new file mode 100644 index 000000000000..2cdd0f6c7d6a Binary files /dev/null and b/docs/guides/images/token-exchange-switch-refresh.png differ diff --git a/docs/guides/images/token-exchange-switch.png b/docs/guides/images/token-exchange-switch.png new file mode 100644 index 000000000000..e326489b2e3f Binary files /dev/null and b/docs/guides/images/token-exchange-switch.png differ diff --git a/docs/guides/images/ui-customization/account-console-with-avatar.png b/docs/guides/images/ui-customization/account-console-with-avatar.png new file mode 100644 index 000000000000..9b22807101c1 Binary files /dev/null and b/docs/guides/images/ui-customization/account-console-with-avatar.png differ diff --git a/docs/guides/images/ui-customization/avatar-validation.png b/docs/guides/images/ui-customization/avatar-validation.png new file mode 100644 index 000000000000..f6e1bfffc063 Binary files /dev/null and b/docs/guides/images/ui-customization/avatar-validation.png differ diff --git a/docs/guides/images/ui-customization/color-chooser.png b/docs/guides/images/ui-customization/color-chooser.png new file mode 100644 index 000000000000..da720917a54c Binary files /dev/null and b/docs/guides/images/ui-customization/color-chooser.png differ diff --git a/docs/guides/images/ui-customization/custom-account-console.png b/docs/guides/images/ui-customization/custom-account-console.png new file mode 100644 index 000000000000..d7a1b459f1d0 Binary files /dev/null and b/docs/guides/images/ui-customization/custom-account-console.png differ diff --git a/docs/guides/images/ui-customization/device-activity-after.png b/docs/guides/images/ui-customization/device-activity-after.png new file mode 100644 index 000000000000..6f30d91af838 Binary files /dev/null and b/docs/guides/images/ui-customization/device-activity-after.png differ diff --git a/docs/guides/images/ui-customization/device-activity-before.png b/docs/guides/images/ui-customization/device-activity-before.png new file mode 100644 index 000000000000..2f87d87ac467 Binary files /dev/null and b/docs/guides/images/ui-customization/device-activity-before.png differ diff --git a/docs/guides/images/ui-customization/easy-theme-find-color.png b/docs/guides/images/ui-customization/easy-theme-find-color.png new file mode 100644 index 000000000000..2fca430aef30 Binary files /dev/null and b/docs/guides/images/ui-customization/easy-theme-find-color.png differ diff --git a/docs/guides/images/ui-customization/empty-user-federation-page.png b/docs/guides/images/ui-customization/empty-user-federation-page.png new file mode 100644 index 000000000000..5576a174f900 Binary files /dev/null and b/docs/guides/images/ui-customization/empty-user-federation-page.png differ diff --git a/docs/guides/images/ui-customization/keycloak_logo.png b/docs/guides/images/ui-customization/keycloak_logo.png new file mode 100644 index 000000000000..4883f5230235 Binary files /dev/null and b/docs/guides/images/ui-customization/keycloak_logo.png differ diff --git a/docs/guides/images/ui-customization/login-sunrise.png b/docs/guides/images/ui-customization/login-sunrise.png new file mode 100644 index 000000000000..6879b1f438f0 Binary files /dev/null and b/docs/guides/images/ui-customization/login-sunrise.png differ diff --git a/docs/guides/images/ui-customization/myPage.png b/docs/guides/images/ui-customization/myPage.png new file mode 100644 index 000000000000..040ed44758ce Binary files /dev/null and b/docs/guides/images/ui-customization/myPage.png differ diff --git a/docs/guides/images/ui-customization/picture-attribute-general-settings.png b/docs/guides/images/ui-customization/picture-attribute-general-settings.png new file mode 100644 index 000000000000..5e203aab983e Binary files /dev/null and b/docs/guides/images/ui-customization/picture-attribute-general-settings.png differ diff --git a/docs/guides/images/ui-customization/quick-theme-overview.png b/docs/guides/images/ui-customization/quick-theme-overview.png new file mode 100644 index 000000000000..90e67add7462 Binary files /dev/null and b/docs/guides/images/ui-customization/quick-theme-overview.png differ diff --git a/docs/guides/images/ui-customization/readonly-user-storage-provider-with-config.png b/docs/guides/images/ui-customization/readonly-user-storage-provider-with-config.png new file mode 100644 index 000000000000..68e71937da0b Binary files /dev/null and b/docs/guides/images/ui-customization/readonly-user-storage-provider-with-config.png differ diff --git a/docs/guides/images/ui-customization/simple-quick-theme-changes.png b/docs/guides/images/ui-customization/simple-quick-theme-changes.png new file mode 100644 index 000000000000..367df066e0e0 Binary files /dev/null and b/docs/guides/images/ui-customization/simple-quick-theme-changes.png differ diff --git a/docs/guides/images/ui-customization/storage-provider-created.png b/docs/guides/images/ui-customization/storage-provider-created.png new file mode 100644 index 000000000000..7e5757ce8cba Binary files /dev/null and b/docs/guides/images/ui-customization/storage-provider-created.png differ diff --git a/docs/guides/images/ui-customization/user-federation-page.png b/docs/guides/images/ui-customization/user-federation-page.png new file mode 100644 index 000000000000..787eb9e08aff Binary files /dev/null and b/docs/guides/images/ui-customization/user-federation-page.png differ diff --git a/docs/guides/observability/configuration-metrics.adoc b/docs/guides/observability/configuration-metrics.adoc index c37dbdd2737e..2699d9acd4eb 100644 --- a/docs/guides/observability/configuration-metrics.adoc +++ b/docs/guides/observability/configuration-metrics.adoc @@ -4,8 +4,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Enabling {project_name} Metrics" -summary="Learn how to enable and expose metrics from the server" +title="Gaining insights with metrics" +summary="Collect metrics to gain insights about state and activities of a running instance of {project_name}." includedOptions="metrics-enabled http-metrics-* cache-metrics-*"> {project_name} has built in support for metrics. This {section} describes how to enable and configure server metrics. diff --git a/docs/guides/observability/event-metrics.adoc b/docs/guides/observability/event-metrics.adoc index f6497a232516..3cd82750a4ea 100644 --- a/docs/guides/observability/event-metrics.adoc +++ b/docs/guides/observability/event-metrics.adoc @@ -4,12 +4,10 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Enabling {project_name} Event Metrics" -summary="Learn how to enable and use {project_name} Event Metrics" -preview="true" +title="Monitoring user activities with event metrics" +summary="Event metrics provide an aggregated view of user activities in a {project_name} instance." includedOptions="metrics-enabled event-metrics-user-*"> -Event metrics can provide admins an overview of the different activities in a {project_name} instance. For now, only metrics for user events are captured. For example, you can monitor the number of logins, login failures, or token refreshes performed. @@ -21,11 +19,11 @@ If you have multiple instances running in a cluster, you will need to collect th == Enable event metrics -To start collecting metrics, enable the feature `user-event-metrics`, enable metrics, and enable the metrics for user events. +To start collecting event metrics, enable metrics and enable the metrics for user events. The following shows the required startup parameters: -<@kc.start parameters="--features=user-event-metrics --metrics-enabled=true --event-metrics-user-enabled=true ..."/> +<@kc.start parameters="--metrics-enabled=true --event-metrics-user-enabled=true ..."/> By default, there is a separate metric for each realm. To break down the metric by client and identity provider, you can add those metrics dimension using the configuration option `event-metrics-user-tags`. diff --git a/docs/guides/observability/exemplars.adoc b/docs/guides/observability/exemplars.adoc new file mode 100644 index 000000000000..6e700988e0c4 --- /dev/null +++ b/docs/guides/observability/exemplars.adoc @@ -0,0 +1,101 @@ +<#import "/templates/guide.adoc" as tmpl> +<#import "/templates/links.adoc" as links> + +<@tmpl.guide +title="Analyzing outliers and errors with exemplars" +summary="Use exemplars to connect a metric to a recorded trace to analyze the root cause of errors or latencies."> + +Metrics are aggregations over several events, and show you if your system is operating within defined bounds. +They are great to monitor error rates or tail latencies and to set up alerting or drive performance optimizations. +Still, the aggregation makes it difficult to find root causes for latencies or errors reported in metrics. + +Root causes for errors and latencies can be found by enabling tracing. +To connect a metric to a recorded trace, there is the concept of https://grafana.com/docs/grafana/latest/fundamentals/exemplars/[exemplars]. + +Once exemplars are set up, {project_name} reports metrics with their last recorded trace as an exemplar. +A dashboard tool like Grafana can link the exemplar from a metrics dashboard to a trace view. + +Metrics that support exemplars are: + +* `http_server_requests_seconds_count` (including histograms) + +See the {section} <@links.observability id="metrics-for-troubleshooting-http"/> for details on this metric. + +* `keycloak_credentials_password_hashing_validations_total` + +See the {section} <@links.observability id="metrics-for-troubleshooting-keycloak"/> for details on this metric. + +* `keycloak_user_events_total` + +See the {section} <@links.observability id="metrics-for-troubleshooting-keycloak"/> for details on this metric. + +See below for a screenshot of a heatmap visualization for latencies that is showing an exemplar when hovering over one of the pink indicators. + +.Heatmap diagram with exemplar +image::observability/exemplar.png[] + +== Setting up exemplars + +To benefit from exemplars, perform the following steps: + +. Enable metrics for {project_name} as described in {section} <@links.observability id="configuration-metrics" />. + +. Enable tracing for {project_name} as described in {section} <@links.observability id="tracing" />. + +. Enable exemplar storage in your monitoring system. ++ +For Prometheus, this is a https://prometheus.io/docs/prometheus/latest/feature_flags/#exemplars-storage[preview feature that you need to enable]. + +. Scrape the metrics using the `OpenMetricsText1.0.0` protocol, which is not enabled by default in Prometheus. ++ +If you are using `PodMonitors` or similar in a Kubernetes environment, this can be achieved by adding it to the spec of the custom resource: ++ +[source] +---- +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + ... +spec: + scrapeProtocols: + - OpenMetricsText1.0.0 +---- + +. Configure your metrics datasource where to link to for traces. ++ +When using Grafana and Prometheus, this would be setting up a `exemplarTraceIdDestinations` for the Prometheus datasource, which then points to your tracing datasource that is provided by tools like Jaeger or Tempo. + +. Enable exemplars in your dashboards. ++ +Enable the *Exemplars* toggle in each query on each dashboard where you want to show exemplars. +When set up correctly, you will notice little dots or stars in your dashboards that you can click on to view the traces. + +[NOTE] +==== +* If you do not specify the scrape protocol, Prometheus will by default not send it in the content negotiation, and Keycloak will then fall back to the PrometheusText protocol which will not contain the exemplars. +* If you enabled tracing and metrics, but the request sampling did not record a trace, the exposed metric will not contain any exemplars. +* If you access the metrics endpoint with your browser, the content negotiation will lead to the format PrometheusText being returned, and you will not see any exemplars. +==== + +== Verifying that exemplars work as expected + +Perform the following steps to verify that {project_name} is set up correctly for exemplars: + +. Follow the instructions to set up metrics and tracing for {project_name}. +. For test purposes, record all traces by setting the tracing ration to `1.0`. +See <@links.observability id="tracing" anchor="sampling" /> for recommended sampling settings in production systems. +. Log in to the Keycloak instance to create some traces. +. Scrape the metrics with a command similar to the following and search for those metrics that have an exemplar set: ++ +[source] +---- +$ curl -s http://localhost:9000/metrics \ +-H 'Accept: application/openmetrics-text; version=1.0.0; charset=utf-8' \ +| grep "#.*trace_id" +---- ++ +This should result in an output similar to the following. Note the additional `#` after which the span and trace IDs are added: ++ +[source] +---- +http_server_requests_seconds_count {...} ... # {span_id="...",trace_id="..."} ... +---- + + diff --git a/docs/guides/observability/grafana-dashboards.adoc b/docs/guides/observability/grafana-dashboards.adoc new file mode 100644 index 000000000000..dc7de8e812f6 --- /dev/null +++ b/docs/guides/observability/grafana-dashboards.adoc @@ -0,0 +1,102 @@ +<#import "/templates/guide.adoc" as tmpl> +<#import "/templates/links.adoc" as links> + +<@tmpl.guide +title="Visualizing activities in dashboards" +summary="Install the {project_name} Grafana dashboards to visualize the metrics that capture the status and activities of your deployment."> + +{project_name} provides metrics to observe what is happening inside the deployment. +To understand how metrics evolve over time, it is helpful to collect and visualize them in graphs. + +This guide provides instructions on how to visualize collected {project_name} metrics in a running Grafana instance. + +== Prerequisites + +* {project_name} metrics are enabled. Follow <@links.observability id="configuration-metrics"/> {section} for more details. +* Grafana instance is running and {project_name} metrics are collected into a Prometheus instance. +* For the HTTP request latency heatmaps to work, enable histograms for HTTP metrics by setting `http-metrics-histograms-enabled` to `true`. + +== {project_name} Grafana dashboards + +Grafana dashboards are distributed in the form of a JSON file that is imported into a Grafana instance. +JSON definitions of {project_name} Grafana dashboards are available in the https://github.com/keycloak/keycloak-grafana-dashboard[keycloak/keycloak-grafana-dashboard GitHub repository]. + +Follow these steps to download JSON file definitions. + +. Identify the branch from `keycloak-grafana-dashboards` to use from the following table. ++ + +|=== +|{project_name} version |`keycloak-grafana-dashboards` branch + +|>= 26.1 +|`main` +|=== + +. Clone the GitHub repository ++ +---- +git clone -b BRANCH_FROM_STEP_1 https://github.com/keycloak/keycloak-grafana-dashboard.git +---- +. The dashboards are available in the directory `keycloak-grafana-dashboard/dashboards`. + +The following sections describe the purpose of each dashboard. + +=== {project_name} troubleshooting dashboard + +This dashboard is available in the JSON file: `keycloak-troubleshooting-dashboard.json`. + +On the top of the dashboard, graphs display the service level indicators as defined in <@links.observability id="keycloak-service-level-indicators"/>. +This dashboard can be also used while troubleshooting a {project_name} deployment following the <@links.observability id="metrics-for-troubleshooting"/> {section}, for example, when SLI graphs do not show expected results. + +.Troubleshooting dashboard +image::observability/keycloak-troubleshooting-grafana-dashboard.png[Troubleshooting dashboard] + +=== Keycloak capacity planning dashboard + +This dashboard is available in the JSON file: `keycloak-capacity-planning-dashboard.json`. + +This dashboard shows metrics that are important when estimating the load handled by a {project_name} deployment. +For example, it shows the number of password validations or login flows performed by {project_name}. +For more detail on these metrics, see the {section} <@links.observability id="metrics-for-troubleshooting-keycloak"/>. + +NOTE: {project_name} event metrics must be enabled for this dashboard to work correctly. To enable them, see the {section} <@links.observability id="event-metrics"/>. + +.Capacity planning dashboard +image::observability/keycloak-capacity-planning-dashboard.png[Capacity planning dashboard] + +== Import a dashboard + +. Open the dashboard page from the left Grafana menu. +. Click *New* and *Import*. +. Click *Upload dashboard JSON file* and select the JSON file of the dashboard you want to import. +. Pick your Prometheus datasource. +. Click *Import*. + +== Export a dashboard + +Exporting a dashboard to JSON format may be useful. For example, you may want to suggest a change in our dashboard repository. + +++++ +
    + +++++ +. Open a dashboard you would like to export. +. Click *share* (image:observability/grafana-share-icon.svg[Grafana share icon]) in the top left corner next to the dashboard name. +. Click the *Export* tab. +. Enable *Export for sharing externally*. +. Click either *Save to file* or *View JSON* and *Copy to Clipboard* according to where you want to store the resulting JSON. +++++ +
    +++++ + +== Further reading + +Continue reading on how to connect traces to dashboard in the <@links.observability id="exemplars" /> {section}. + + diff --git a/docs/guides/observability/health.adoc b/docs/guides/observability/health.adoc index b53b5436ce0a..87dbeb4e6c81 100644 --- a/docs/guides/observability/health.adoc +++ b/docs/guides/observability/health.adoc @@ -4,8 +4,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Enabling {project_name} Health checks" -summary="Learn how to enable and use {project_name} health checks" +title="Tracking instance status with health checks" +summary="Check if an instance has finished its start up and is ready to serve requests by calling its health REST endpoints." includedOptions="health-enabled"> {project_name} has built in support for health checks. This {section} describes how to enable and use the {project_name} health checks. diff --git a/docs/guides/observability/keycloak-service-level-indicators.adoc b/docs/guides/observability/keycloak-service-level-indicators.adoc index 82331bdf61eb..732a593c8849 100644 --- a/docs/guides/observability/keycloak-service-level-indicators.adoc +++ b/docs/guides/observability/keycloak-service-level-indicators.adoc @@ -2,8 +2,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="{project_name} service level indicators (SLIs)" -summary="Learn about the Service Level Indicators to monitor your {project_name} deployment's performance" +title="Monitoring performance with Service Level Indicators" +summary="Track performance and reliability as perceived by users with Service Level Indicators (SLIs) and Service Level Objectives (SLOs)." > Service Level Indicators (SLIs) and Service Level Objectives (SLOs) are essential components in monitoring and maintaining the performance and reliability of {project_name} in production environments. @@ -58,12 +58,12 @@ At the same time, if you enter a Service Level Agreement (SLA) with stakeholders | Latency | Response time for authentication related HTTP requests as measured by the server -| 95% of all authentication related requests should be faster than 250 ms within a 5-minute-range. +| 95% of all authentication related requests should be faster than 250 ms within 30 days. | {project_name} server-side metrics to track latency for specific endpoints along with Response Time Distribution using `http_server_requests_seconds_bucket` and `http_server_requests_seconds_count`. | Errors | Failed authentication requests due to server problems as measured by the server -| The rate of errors due to server problems for authentication requests should be less than 0.1% within a 5-minute-range. +| The rate of errors due to server problems for authentication requests should be less than 0.1% within 30 days. | Identify server side error by filtering the metric `http_server_requests_seconds_count` on the tag `outcome` for value `SERVER_ERROR`. |=== @@ -103,7 +103,7 @@ NOTE: In Grafana you can replace value `30d:15s` with `$__range:$__interval` to === Latency of authentication requests -This Prometheus query calculates the percentage of authentication requests that completed within 0.25 seconds relative to all authentication requests for specific {project_name} endpoints, targeting a particular namespace and pod, over the past 5 minutes. +This Prometheus query calculates the percentage of authentication requests that completed within 0.25 seconds relative to all authentication requests for specific {project_name} endpoints, targeting a particular namespace and pod, over the past 30 days. This example requires the {project_name} configuration `http-metrics-slos` to contain value `250` indicating that buckets for requests faster and slower than 250 ms should be recorded. Setting `http-metrics-histograms-enabled` to `true` would capture additional buckets which can help with performance troubleshooting. @@ -116,7 +116,7 @@ sum( le="0.25", # <2> container="keycloak", # <3> namespace="$namespace"} - [5m] # <4> + [30d] # <4> ) ) without (le,uri,status,outcome,method,pod,instance) # <5> / @@ -126,7 +126,7 @@ sum( uri=~"/realms/{realm}/protocol/{protocol}.*|/realms/{realm}/login-actions.*", # <1> container="keycloak", namespace="$namespace"} - [5m] # <3> + [30d] # <3> ) ) without (le,uri,status,outcome,method,pod,instance) # <5> ---- @@ -136,13 +136,13 @@ sum( <4> Time range as specified by the SLO <5> Ignore as many labels necessary to create a single sum -NOTE: In Grafana you can replace value `5m` with `$__range` to compute latency SLI in the time range selected for the dashboard. +NOTE: In Grafana, you can replace value `30d` with `$__range` to compute latency SLI in the time range selected for the dashboard. === Errors for authentication requests This Prometheus query calculates the percentage of authentication requests that returned a server side error for all authentication requests, -targeting a particular namespace, over the past 5 minutes. +targeting a particular namespace, over the past 30 days. [source,plaintext] ---- @@ -153,7 +153,7 @@ sum( outcome="SERVER_ERROR", # <2> container="keycloak", # <3> namespace="$namespace"} - [5m] # <4> + [30d] # <4> ) ) without (le,uri,status,outcome,method,pod,instance) # <5> / @@ -163,7 +163,7 @@ sum( uri=~"/realms/{realm}/protocol/{protocol}.*|/realms/{realm}/login-actions.*", # <1> container="keycloak", # <3> namespace="$namespace"} - [5m] # <4> + [30d] # <4> ) ) without (le,uri,status,outcome,method,pod,instance) # <5> ---- @@ -173,6 +173,8 @@ sum( <4> Time range as specified by the SLO <5> Ignore as many labels necessary to create a single sum +NOTE: In Grafana, you can replace value `30d` with `$__range` to compute errors SLI in the time range selected for the dashboard. + == Further Reading * https://sre.google/sre-book/service-level-objectives/[Google SRE Book on Service Level Objectives] diff --git a/docs/guides/observability/metrics-for-troubleshooting-clustering-and-network.adoc b/docs/guides/observability/metrics-for-troubleshooting-clustering-and-network.adoc index 516d005c3cfe..9398ed0c1897 100644 --- a/docs/guides/observability/metrics-for-troubleshooting-clustering-and-network.adoc +++ b/docs/guides/observability/metrics-for-troubleshooting-clustering-and-network.adoc @@ -2,8 +2,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Clustering and network metrics" -summary="Learn about metrics monitoring communication between {project_name} nodes" +title="Clustering metrics" +summary="Use metrics to monitor communication between {project_name} nodes." tileVisible="false" > diff --git a/docs/guides/observability/metrics-for-troubleshooting-database.adoc b/docs/guides/observability/metrics-for-troubleshooting-database.adoc index 265a6222dfc4..aad89c176c1f 100644 --- a/docs/guides/observability/metrics-for-troubleshooting-database.adoc +++ b/docs/guides/observability/metrics-for-troubleshooting-database.adoc @@ -2,8 +2,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="{project_name} Database Metrics" -summary="Learn about metrics describing {project_name}'s connection to the database" +title="Database Metrics" +summary="Use metrics to describe {project_name}'s connection to the database." tileVisible="false" > diff --git a/docs/guides/observability/metrics-for-troubleshooting-embedded-caches-multi-site.adoc b/docs/guides/observability/metrics-for-troubleshooting-embedded-caches-multi-site.adoc index fb2ea08be148..b10077f86154 100644 --- a/docs/guides/observability/metrics-for-troubleshooting-embedded-caches-multi-site.adoc +++ b/docs/guides/observability/metrics-for-troubleshooting-embedded-caches-multi-site.adoc @@ -2,8 +2,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Embedded Infinispan metrics for multiple sites deployments" -summary="Learn about metrics monitoring caching health" +title="Embedded Infinispan metrics for multi-site deployments" +summary="Use metrics to monitor caching health." tileVisible="false" > diff --git a/docs/guides/observability/metrics-for-troubleshooting-embedded-caches.adoc b/docs/guides/observability/metrics-for-troubleshooting-embedded-caches.adoc index 2f7beba67f61..36824a098bd3 100644 --- a/docs/guides/observability/metrics-for-troubleshooting-embedded-caches.adoc +++ b/docs/guides/observability/metrics-for-troubleshooting-embedded-caches.adoc @@ -3,7 +3,7 @@ <@tmpl.guide title="Embedded Infinispan metrics for single site deployments" -summary="Learn about metrics monitoring caching health and cluster replication" +summary="Use metrics to monitor caching health and cluster replication." tileVisible="false" > diff --git a/docs/guides/observability/metrics-for-troubleshooting-external-infinispan-multi-site.adoc b/docs/guides/observability/metrics-for-troubleshooting-external-infinispan-multi-site.adoc index 36d1e22260a4..236ca0b59255 100644 --- a/docs/guides/observability/metrics-for-troubleshooting-external-infinispan-multi-site.adoc +++ b/docs/guides/observability/metrics-for-troubleshooting-external-infinispan-multi-site.adoc @@ -3,7 +3,7 @@ <@tmpl.guide title="External {jdgserver_name} metrics" -summary="Learn about metrics monitoring external {jdgserver_name} performance" +summary="Use metrics to monitor external {jdgserver_name} performance." tileVisible="false" > diff --git a/docs/guides/observability/metrics-for-troubleshooting-http.adoc b/docs/guides/observability/metrics-for-troubleshooting-http.adoc index a858f531c134..7725ac5652c4 100644 --- a/docs/guides/observability/metrics-for-troubleshooting-http.adoc +++ b/docs/guides/observability/metrics-for-troubleshooting-http.adoc @@ -2,9 +2,10 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="{project_name} HTTP metrics" -summary="Learn about metrics for monitoring the {project_name} HTTP requests processing" +title="HTTP metrics" +summary="Use metrics to monitor the {project_name} HTTP requests processing." tileVisible="false" +includedOptions="http-metrics-histograms-enabled http-metrics-slos" > <#include "partials/prerequisites-metrics-troubleshooting.adoc" /> @@ -37,6 +38,8 @@ m| http_server_requests_seconds_sum | The total duration for all the requests processed. |=== +You can enable histograms for this metric by setting `http-metrics-histograms-enabled` to `true`, and add additional buckets for service level objectives using the option `http-metrics-slos`. + include::partials/histogram_note_http.adoc[] === Active requests diff --git a/docs/guides/observability/metrics-for-troubleshooting-jvm.adoc b/docs/guides/observability/metrics-for-troubleshooting-jvm.adoc index 17081022ee6e..a2d23d12fef9 100644 --- a/docs/guides/observability/metrics-for-troubleshooting-jvm.adoc +++ b/docs/guides/observability/metrics-for-troubleshooting-jvm.adoc @@ -2,8 +2,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="{project_name} JVM metrics" -summary="Learn about key JVM metrics for observing performance of {project_name}" +title="JVM metrics" +summary="Use JVM metrics to observe performance of {project_name}." tileVisible="false" > diff --git a/docs/guides/observability/metrics-for-troubleshooting-keycloak.adoc b/docs/guides/observability/metrics-for-troubleshooting-keycloak.adoc index f34439960d2f..2fd4b7416af3 100644 --- a/docs/guides/observability/metrics-for-troubleshooting-keycloak.adoc +++ b/docs/guides/observability/metrics-for-troubleshooting-keycloak.adoc @@ -2,8 +2,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="{project_name} self-provided metrics" -summary="Learn about key metrics that {project_name} provides" +title="Self-provided metrics" +summary="Learn about the key metrics that {project_name} provides." tileVisible="false" > @@ -73,7 +73,7 @@ Possible values: -- ==== -To configure what tags are available provide a comma-separated list of tag names to the following option `spi-credential-keycloak-password-validations-counter-tags`. +To configure what tags are available provide a comma-separated list of tag names to the following option `spi-credential--keycloak-password--validations-counter-tags`. By default, all tags are enabled. The snippet below is an example of a response provided by the metric endpoint: diff --git a/docs/guides/observability/metrics-for-troubleshooting.adoc b/docs/guides/observability/metrics-for-troubleshooting.adoc index baa8ea0c496e..571627f2f57b 100644 --- a/docs/guides/observability/metrics-for-troubleshooting.adoc +++ b/docs/guides/observability/metrics-for-troubleshooting.adoc @@ -2,8 +2,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Metrics for troubleshooting {project_name} deployment" -summary="Learn about metrics that can indicate where the issue is, for example, when service level objective is not met" +title="Troubleshooting using metrics" +summary="Use metrics for troubleshooting errors and performance issues." > For a running {project_name} deployment it is important to understand how the system performs and whether it meets your service level objectives (SLOs). @@ -39,6 +39,11 @@ Troubleshooting the database itself is out of scope. You should always confirm the configuration change by conducting a performance test comparing the metrics in question for the old and the new configuration. ==== +[NOTE] +==== +Grafana dashboards for the metrics below can be found in <@links.observability id="grafana-dashboards"/> {section}. +==== + == List of {project_name} key metrics * <@links.observability id="metrics-for-troubleshooting-keycloak"/> diff --git a/docs/guides/observability/partials/jgrp_metrics.adoc b/docs/guides/observability/partials/jgrp_metrics.adoc index 8265b33e8be9..e6a8ed69d273 100644 --- a/docs/guides/observability/partials/jgrp_metrics.adoc +++ b/docs/guides/observability/partials/jgrp_metrics.adoc @@ -68,7 +68,7 @@ m| vendor_jgroups_tcp_get_num_bytes_sent m| TCP .3+| The total number of bytes sent by a node. -m| vendor_jgroups_tunnel_get_num_bytes_sent +m| vendor_jgroups_udp_get_num_bytes_sent m| UDP m| vendor_jgroups_tunnel_get_num_bytes_sent diff --git a/docs/guides/observability/pinned-guides b/docs/guides/observability/pinned-guides index d7b1e17822ee..ad86dfbc9916 100644 --- a/docs/guides/observability/pinned-guides +++ b/docs/guides/observability/pinned-guides @@ -3,4 +3,14 @@ configuration-metrics event-metrics keycloak-service-level-indicators metrics-for-troubleshooting +metrics-for-troubleshooting-keycloak +metrics-for-troubleshooting-jvm +metrics-for-troubleshooting-database +metrics-for-troubleshooting-http +metrics-for-troubleshooting-clustering-and-network +metrics-for-troubleshooting-embedded-caches +metrics-for-troubleshooting-embedded-caches-multi-site +metrics-for-troubleshooting-external-infinispan-multi-site tracing +grafana-dashboards +exemplars diff --git a/docs/guides/observability/tracing.adoc b/docs/guides/observability/tracing.adoc index ba9817ad5556..bf2dba3706d0 100644 --- a/docs/guides/observability/tracing.adoc +++ b/docs/guides/observability/tracing.adoc @@ -4,14 +4,14 @@ <#import "/templates/links.adoc" as links> <#import "/templates/profile.adoc" as profile> -<@tmpl.guide title="Enabling Tracing" -summary="Learn how to enable distributed tracing in {project_name}" +<@tmpl.guide title="Root cause analysis with tracing" +summary="Record information during the request lifecycle with OpenTelementry tracing to identify root cases for latencies and errors in {project_name} and connected systems." includedOptions="tracing-* log-*-include-trace"> This {section} explains how you can enable and configure distributed tracing in {project_name} by utilizing https://opentelemetry.io/[OpenTelemetry] (OTel). Tracing allows for detailed monitoring of each request's lifecycle, which helps quickly identify and diagnose issues, leading to more efficient debugging and maintenance. -It also provides valuable insights into performance bottlenecks and can help optimize the system's overall efficiency. +It provides valuable insights into performance bottlenecks and can help optimize the system's overall efficiency and across system boundaries. {project_name} uses a supported https://quarkus.io/guides/opentelemetry-tracing[Quarkus OTel extension] that provides smooth integration and exposure of application traces. == Enable tracing @@ -122,9 +122,10 @@ For instance, to disable trace info in the `console` log, you can turn it off as NOTE: When you explicitly override the log format for the particular log handlers, the `*-include-trace` options do not have any effect, and no tracing is included. +[[sampling]] == Sampling -Sampler decides whether a trace should be discarded or forwarded, effectively reducing overhead by limiting the number of collected traces sent to the collector. +The sampler decides whether a trace should be discarded or forwarded, effectively reducing overhead by limiting the number of collected traces sent to the collector. It helps manage resource consumption, which leads to avoiding the huge storage costs of tracing every single request and potential performance penalty. WARNING: For a production-ready environment, sampling should be properly set to minimize infrastructure costs. @@ -140,11 +141,13 @@ The default sampler for {project_name} is `traceidratio`, which controls the rat ==== Trace ratio The default trace ratio is `1.0`, which means all traces are sampled - sent to the collector. -The ratio is a floating number in the range `(0,1]`. +The ratio is a floating number in the range `[0,1]`. For instance, when the ratio is `0.1`, only 10% of the traces are sampled. WARNING: For a production-ready environment, the trace ratio should be a smaller number to prevent the massive cost of trace store infrastructure and avoid performance overhead. +TIP: The ratio can be set to `0.0` to disable sampling entirely _at runtime_. + ==== Rationale The sampler makes its own sampling decisions based on the current ratio of sampled spans, regardless of the decision made on the parent span, @@ -176,6 +179,6 @@ You can filter out the required traces in your tracing backend based on their ta {project_name} Operator automatically sets the `KC_TRACING_SERVICE_NAME` and `KC_TRACING_RESOURCE_ATTRIBUTES` environment variables for each {project_name} container included in pods it manages. -NOTE: The `KC_TRACING_RESOURCE_ATTRIBUTES` variable always contains (if not overridden) the `k8s.namespace.name` attribute representing current namespace. +NOTE: The `KC_TRACING_RESOURCE_ATTRIBUTES` variable always contains (if not overridden) the `k8s.namespace.name` attribute representing the current namespace. diff --git a/docs/guides/operator/advanced-configuration.adoc b/docs/guides/operator/advanced-configuration.adoc index a34dbc64ad6d..7c94cc3a36d8 100644 --- a/docs/guides/operator/advanced-configuration.adoc +++ b/docs/guides/operator/advanced-configuration.adoc @@ -6,7 +6,7 @@ <@tmpl.guide title="Advanced configuration" -summary="How to tune advanced aspects of the Keycloak CR"> +summary="Tune advanced aspects of the Keycloak CR."> == Advanced configuration This {section} describes how to use Custom Resources (CRs) for advanced configuration of your {project_name} deployment. @@ -91,11 +91,11 @@ metadata: spec: ... additionalOptions: - - name: spi-connections-http-client-default-connection-pool-size + - name: spi-connections-http-client--default--connection-pool-size secret: # Secret reference name: http-client-secret # name of the Secret key: poolSize # name of the Key in the Secret - - name: spi-email-template-mycustomprovider-enabled + - name: spi-email-template--mycustomprovider--enabled value: true # plain text value ---- NOTE: The name format of options defined in this way is identical to the key format of options specified in the configuration file. @@ -149,6 +149,28 @@ spec: secretName: keycloak-additional-secret ---- +===== Probe Configuration + +The Keycloak CR exposes options to set periodSeconds and failureThreshold on each of the three probes (readiness, liveness and startup) + +[source,yaml] +---- +apiVersion: k8s.keycloak.org/v2alpha1 +kind: Keycloak +metadata: + name: example-kc +spec: + readinessProbe: + periodSeconds: 20 + failureThreshold: 5 + livenessProbe: + periodSeconds: 20 + failureThreshold: 5 + startupProbe: + periodSeconds: 20 + failureThreshold: 5 +---- + === Disabling required options {project_name} and the {project_name} Operator provide the best production-ready experience with security in mind. @@ -312,9 +334,9 @@ This includes `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt` and the `/v === Admin Bootstrapping -When you create a new instance the Keycloak CR spec.bootstrapAdmin stanza may be used to configure the bootstrap user and/or service account. If you do not specify anything for the spec.bootstrapAdmin, the operator will create a Secret named "metadata.name"-initial-admin with a username temp-admin and a generated password. If you specify a Secret name for bootstrap admin user, then the Secret will need to contain `username` and `password` key value pairs. If you specify a Secret name for bootstrap admin service account, then the Secret will need to contain `client-id` and `client-secret` key value pairs. +When you create a new instance the Keycloak CR spec.bootstrapAdmin stanza may be used to configure the bootstrap user and/or service account. If you do not specify anything for the spec.bootstrapAdmin, the operator will create a Secret named "metadata.name"-initial-admin with a username temp-admin and a generated password. If you specify a Secret name for the bootstrap admin user, then the Secret will need to contain `username` and `password` key value pairs. If you specify a Secret name for bootstrap admin service account, then the Secret will need to contain `client-id` and `client-secret` key value pairs. -If a master realm has already been created for you cluster, then the spec.boostrapAdmin is effectively ignored. If you need to create a recovery admin account, then you'll need to run the CLI command against a Pod directly. +If a master realm has already been created for your cluster, then the spec.boostrapAdmin is effectively ignored. If you need to create a recovery admin account, then you'll need to run the CLI command against a Pod directly. For more information on how to bootstrap a temporary admin user or service account and recover lost admin access, refer to the <@links.server id="bootstrap-admin-recovery"/> guide. @@ -349,14 +371,15 @@ NOTE: The `tracing-jdbc-enabled` is not promoted as a first-class citizen as it For more details about tracing, see <@links.observability id="tracing" />. -=== Network Policies (Preview) +=== Network Policies NetworkPolicies allow you to specify rules for traffic flow within your cluster, and also between Pods and the outside world. -Your cluster must use a network plugin that supports NetworkPolicy enforcement. +Your cluster must use a network plugin that supports NetworkPolicy enforcement to restrict the network traffic. -The operator can automatically create a NetworkPolicy to deny access to the clustering port of your {project_name} Pods. +The operator automatically creates a NetworkPolicy to deny access to the clustering port of your {project_name} Pods. The HTTP(S) endpoint is open to traffic from any namespace and the outside world. -To enable the NetworkPolicy, set `spec.networkPolicy.enabled` in your Keycloak CR, as shown in the example below. + +To disable the NetworkPolicy, set `spec.networkPolicy.enabled` in your Keycloak CR, as shown in the example below. .Keycloak CR with Network Policies enabled [source,yaml] @@ -367,12 +390,12 @@ metadata: name: example-kc spec: networkPolicy: - enabled: true + enabled: false ---- -The above example allows traffic from all sources. +By default, traffic to the HTTP endpoints and the management endpoint is allowed from all sources. The Keycloak CR can be extended to include a list of rules for each of the endpoints exposed by {project_name}. -These rules specify from where (the source) the traffic is allowed and it's possible to communicate with the {project_name} Pods. +These rules specify from where (the source) the traffic is allowed, and it is possible to communicate with the {project_name} Pods. .Extended Network Policy configuration [source,yaml] @@ -404,8 +427,8 @@ For a concrete example, let's imagine we have a {project_name} deployment runnin Users have to access {project_name} to login, so {project_name} must be accessible from the Internet. To make this example more interesting, let's assume the {project_name} is monitored too. -The monitoring is enabled as described in the OpenShift documentation page: -https://docs.openshift.com/container-platform/4.12/observability/monitoring/enabling-monitoring-for-user-defined-projects.html[enabling Monitoring for user defined projects]. +The monitoring is enabled as described in this OpenShift documentation page: +https://docs.redhat.com/en/documentation/openshift_container_platform/4.18/html/monitoring/configuring-user-workload-monitoring#enabling-monitoring-for-user-defined-projects_preparing-to-configure-the-monitoring-stack-uwm[enabling Monitoring for user defined projects]. Based on those requirements, the Keycloak CR would be like this (most parts are omitted, like DB connection and security): @@ -439,4 +462,25 @@ They need to access {project_name} to scrape the available metrics. Check the https://kubernetes.io/docs/concepts/services-networking/network-policies/[Kubernetes Network Policies documentation] for more information about NetworkPolicies. +=== Parameterizing service labels and annotations + +If you need to set custom labels or annotations to keycloak service you can do that through `spec.http.labels` and `spec.http.annotations` + +.Custom service labels and annotations +[source,yaml] +---- +apiVersion: k8s.keycloak.org/v2alpha1 +kind: Keycloak +metadata: + name: example-kc +spec: + http: + labels: + label1: label-value1 + label2: label-value2 + annotations: + annotation1: annotation-value1 + annotation2: annotation-value2 +---- + diff --git a/docs/guides/operator/basic-deployment.adoc b/docs/guides/operator/basic-deployment.adoc index e70505da0aa8..9f4802f7c701 100644 --- a/docs/guides/operator/basic-deployment.adoc +++ b/docs/guides/operator/basic-deployment.adoc @@ -6,8 +6,7 @@ <@tmpl.guide title="Basic {project_name} deployment" -priority=20 -summary="How to install {project_name} using the Operator"> +summary="Install {project_name} using the Operator."> == Performing a basic {project_name} deployment This {section} describes how to perform a basic {project_name} Deployment on @@ -100,9 +99,9 @@ When running on OpenShift, with ingress enabled, and with the spec.ingress.class The operator will assign a default hostname to the stored version of the CR similar to what would be created by an OpenShift Route without an explicit host - that is ingress-namespace.appsDomain If the appsDomain changes, or should you need a different hostname for any reason, then update the Keycloak CR. -NOTE: If you set the `hostname-admin`, or the deprecated `hostname-admin-url`, even if you enable ingress, no ingress will be created specifically for admin access. -Admin access via a separate hostname is generally expected to have access restrictions, which are not currently expressible via the Keycloak CR. -Also the default ingress does not prevent accessing admin endpoints, so you may not want to enable ingress handling via the Keycloak CR at all when you have a separate hostname for admin endpoints. +NOTE: If you set the `hostname-admin`, or the deprecated `hostname-admin-url`, even if you enable ingress, no ingress will be created specifically for admin access. +Admin access via a separate hostname is generally expected to have access restrictions, which are not currently expressible via the Keycloak CR. +Also the default ingress does not prevent accessing admin endpoints, so you may not want to enable ingress handling via the Keycloak CR at all when you have a separate hostname for admin endpoints. ==== TLS Certificate and key @@ -193,7 +192,9 @@ CONDITION: RollingUpdate === Accessing the {project_name} deployment -The {project_name} deployment is exposed through a basic Ingress and is accessible through the provided hostname. On installations with multiple default IngressClass instances +The {project_name} deployment can be exposed through a basic Ingress accessible through the provided hostname. + +On installations with multiple default IngressClass instances or when running on OpenShift 4.12+ you should provide an ingressClassName by setting `ingress` spec with `className` property to the desired class name: Edit YAML file `example-kc.yaml`: @@ -232,7 +233,12 @@ Apply the changes: ---- kubectl apply -f example-kc.yaml ---- -You can provide an alternative ingress resource pointing to the service `-service`. +You can then provide an alternative ingress resource pointing to the service `-service`. For example, on OpenShift you are not allowed to use wildcard certificates on passthrough Routes with HTTP/2 enabled. A Keycloak CR on OpenShift with TLS enabled using a wildcard certificate with the default IngressClass creates such a Route. In this case, you must disable the built-in ingress with `.spec.ingress.enabled: false`. Access may then be provided by creating a reencrypt Route instead: + +[source,yaml] +---- +$ oc create route reencrypt --service=-service --cert= --key= --dest-ca-cert= --ca-cert= --hostname= +---- For debugging and development purposes, consider directly connecting to the {project_name} service using a port forward. For example, enter this command: @@ -290,4 +296,15 @@ kubectl get secret example-kc-initial-admin -o jsonpath='{.data.password}' | bas You can use those credentials to access the Admin Console or the Admin REST API. +=== Security Considerations + +[WARNING] +==== +Anyone with the ability to create or edit a Keycloak CR should be a namespace level admin. +==== + +Setting the Keycloak CR image requires a high degree of trust as whatever image is running will have access to any Secrets used for environement variables. + +Similarly the unsupported podTemplate gives the ability to deploy alternative workloads which may be granted the same permissions as the operator itself - which includes the ability to access Secrets in the namespace. + diff --git a/docs/guides/operator/customizing-keycloak.adoc b/docs/guides/operator/customizing-keycloak.adoc index 571af2dae212..968299a54e08 100644 --- a/docs/guides/operator/customizing-keycloak.adoc +++ b/docs/guides/operator/customizing-keycloak.adoc @@ -5,7 +5,7 @@ <@tmpl.guide title="Using custom {project_name} images" -summary="How to customize and optimize the {project_name} Container"> +summary="Customize and optimize the {project_name} container."> == {project_name} custom image with the Operator @@ -22,7 +22,7 @@ To avoid this delay, you can provide a custom image with the augmentation built- With a custom image, you can also specify the Keycloak _build-time_ configurations and extensions during the build of the container. -WARNING: When using optimized custom image, `health-enabled` and `metrics-enabled` options need to be explicitly set in the Containerfile. +WARNING: When using the optimized custom image, `health-enabled` and `metrics-enabled` options need to be explicitly set in the Containerfile. For instructions on how to build such an image, see <@links.server id="containers"/>. @@ -52,7 +52,7 @@ Use the Keycloak CR for any configuration that requires Operator awareness, name === Non-optimized custom image -While it is considered a best practice use a pre-augmented image, if you want to use a non-optimized custom image or build time properties with an augmented image that is still possible. You just need set the `startOptimized` field to `false` as shown in this example: +While it is considered a best practice to use a pre-augmented image, if you want to use a non-optimized custom image or build time properties with an augmented image that is still possible. You just need set the `startOptimized` field to `false` as shown in this example: [source,yaml] ---- diff --git a/docs/guides/operator/installation.adoc b/docs/guides/operator/installation.adoc index 38671af6920f..578073824deb 100644 --- a/docs/guides/operator/installation.adoc +++ b/docs/guides/operator/installation.adoc @@ -6,8 +6,7 @@ <@tmpl.guide title="{project_name} Operator Installation" -priority=10 -summary="How to install the {project_name} Operator on Kubernetes and OpenShift"> +summary="Install the {project_name} Operator on Kubernetes and OpenShift."> == Installing the {project_name} Operator This {section} describes how to install the {project_name} Operator in a Kubernetes or OpenShift cluster. @@ -44,6 +43,58 @@ image::configure-operator.png["Configure {project_name} Operator"] You may select to either have the Operator watch the namespace where it is installed, or to watch a single namespace of your choosing. +==== Configuring Manual Approval for OLM Upgrades + +[WARNING] +==== +*Important: Automatic OLM Upgrades* + +By default, OLM automatically updates the {project_name} Operator when a new version is released. This can cause several significant issues: + +* When using the default {project_name} image, the Operator uses a matching image of the corresponding {project_name} version, resulting in *unintended {project_name} upgrades* when the Operator is upgraded +* *Even when using custom images*, major Operator upgrades can introduce significant compatibility issues with your existing Keycloak CR configuration, potentially requiring manual intervention +* New fields in Keycloak CR or behavioral changes could impact existing deployments +* No option to downgrade to the previous {project_name} version due to changes related to database migration + +*Recommendation:* + +*We strongly recommend using manual approval mode for the Keycloak Operator.* This ensures you can: + +1. Review release notes and follow migration changes before approving upgrades +2. Schedule maintenance windows for upgrades +3. Test upgrades in a non-production environment first +4. Back up the database to allow downgrading to the previous {project_name} in case of issues +==== + +To prevent automatic upgrades by OLM, set the approval strategy to `Manual` when installing the Operator: + +===== Using the OpenShift web console + +When installing the Operator, select `Manual` approval in the update approval strategy section: + +image::manual-approval-olm.png["Configure manual approval in OLM"] + +===== Using the CLI + +For command-line installation, create a Subscription with `installPlanApproval: Manual`: + +[source,yaml] +---- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: keycloak-operator + namespace: +spec: + channel: fast + name: keycloak-operator + source: + sourceNamespace: + installPlanApproval: Manual +---- + +After installation, any upgrade will require manual approval through the OLM interface or via the CLI. + <@profile.ifCommunity> === Installing by using kubectl without Operator Lifecycle Manager @@ -64,7 +115,7 @@ kubectl apply -f https://raw.githubusercontent.com/keycloak/keycloak-k8s-resourc kubectl apply -f https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/{version}/kubernetes/kubernetes.yml ---- -The Operator will watch the namespace where it is installed. You may optionally select a namespace with the `-n` option. +The Operator will watch the namespace where it is installed. You may optionally select a namespace with the `-n` option. === Installing Multiple Operators @@ -77,8 +128,8 @@ If you do this please be aware: - CRD revisions from newer Operator versions won't introduce breaking changes except for the eventual removal of fields that have been well deprecated. Thus newer CRDs are generally backward compatible. - the CRDs installed last will be the ones in use. This applies to OLM installations as well where the Operator version, that is installed as the last, also installs and overrides the CRDs if they exists in the cluster already. - older CRDs may not be forwards compatible with new fields used by newer operators. When using OLM it will check if your custom resources are compatible with the CRDs being installed, so the usage of new fields can prevent the simultaneous installation of older operator versions. -- fields introduced by newer CRDs will not be supported by older Operators. Older Operator will fail to handle CRs that use such new fields with an error deserializing an unrecognized field. +- fields introduced by newer CRDs will not be supported by older Operators. Older operators will fail to handle CRs that use such new fields with an error deserializing an unrecognized field. -It is therefore recommended in a multiple Operator install scenario that you keep versions aligned as closely as possible to minimize the potential problems with different versions. +It is therefore recommended in a multiple Operator install scenario that you keep versions aligned as closely as possible to minimize the potential problems with different versions. diff --git a/docs/guides/operator/realm-import.adoc b/docs/guides/operator/realm-import.adoc index 553d14539377..11c28eb52ec8 100644 --- a/docs/guides/operator/realm-import.adoc +++ b/docs/guides/operator/realm-import.adoc @@ -4,9 +4,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="{project_name} Realm Import" -priority=30 -summary="How to perform an automated {project_name} Realm Import using the operator"> +title="Automating a realm import" +summary="Automate a realm import using the operator."> == Importing a {project_name} Realm @@ -17,6 +16,8 @@ Using the {project_name} Operator, you can perform a realm import for the Keyclo * If a Realm with the same name already exists in {project_name}, it will not be overwritten. * The Realm Import CR only supports creation of new realms and does not update or delete those. Changes to the realm performed directly on {project_name} are not synced back in the CR. + +* Once the realm is imported you should delete the Realm Import CR as that will cleanup the associated Kubernetes Job and Pod resources. ==== === Creating a Realm Import Custom Resource @@ -85,10 +86,10 @@ When the import has successfully completed, the output will look like the follow ---- CONDITION: Done STATUS: true - MESSAGE: + MESSAGE: CONDITION: Started STATUS: false - MESSAGE: + MESSAGE: CONDITION: HasErrors STATUS: false MESSAGE: @@ -96,7 +97,7 @@ CONDITION: HasErrors === Placeholders -Imports support placeholders referencing environment variables, see <@links.server id="importExport"/> for more. +Imports support placeholders referencing environment variables, see <@links.server id="importExport"/> for more. The `KeycloakRealmImport` CR allows you to leverage this functionality via the `spec.placeholders` stanza, for example: [source,yaml] @@ -115,7 +116,7 @@ spec: ... ---- -In the above example placeholder replacement will be enabled and an environment variable with key `ENV_KEY` will be created from the Secret `SECRET_NAME`'s value for key `SECRET_KEY`. +In the above example placeholder replacement will be enabled and an environment variable with key `ENV_KEY` will be created from the Secret `SECRET_NAME`'s value for key `SECRET_KEY`. Currently only Secrets are supported and they must be in the same namespace as the Keycloak CR. - + diff --git a/docs/guides/operator/rolling-updates.adoc b/docs/guides/operator/rolling-updates.adoc new file mode 100644 index 000000000000..74f9d9e2c125 --- /dev/null +++ b/docs/guides/operator/rolling-updates.adoc @@ -0,0 +1,142 @@ +<#import "/templates/guide.adoc" as tmpl> +<#import "/templates/kc.adoc" as kc> +<#import "/templates/options.adoc" as opts> +<#import "/templates/links.adoc" as links> +<#import "/templates/profile.adoc" as profile> + +<@tmpl.guide +title="Avoiding downtime with rolling updates" +summary="Avoid downtime when changing themes, providers, or configurations in optimized images."> + +By default, the {project_name} Operator will perform rolling updates on configuration changes without downtime, and recreate updates with downtime when the image name or tag changes. + +This {section} describes how to minimize downtimes by configuring the {project_name} Operator to perform rolling updates of {project_name} automatically where possible, and how to override automatic detection for rolling updates. + +Use it, for example, to avoid downtimes when rolling out an update to a theme, provider or build time configuration in a custom or optimized image. + +== Supported Update Strategies + +The Operator supports the following update strategies: + +Rolling Updates:: Update the StatefulSet in a rolling fashion, avoiding a downtime when at least two replicas are running. + +Recreate Updates:: Scale down the StatefulSet before applying updates, causing temporary downtime. + +== Configuring the Update Strategy + +Specify the update strategy within the `spec` section of the Keycloak CR YAML definition: + +[source,yaml] +---- +apiVersion: k8s.keycloak.org/v2alpha1 +kind: Keycloak +metadata: + name: example-kc +spec: + update: + strategy: RecreateOnImageChange|Auto|Explicit # <1> + revision: "abc" # <2> +---- +<1> Set the desired update strategy here. + +<2> Revision value for `Explicit` strategy. +Ignored by the other strategies. + +[%autowidth] +.Possible field values +|=== +|Value |Downtime? |Description + +|`RecreateOnImageChange` (default) +|On image name or tag change +|Mimics {project_name} 26.1 or older behavior. +When the image field changes, the Operator scales down the StatefulSet before applying the new image. + +|`Auto` +|On incompatible changes +|The {project_name} Operator detects if a rolling or recreate update is possible. + +In the current version, {project_name} performs a rolling update if the {project_name} version is the same for the old and the new image. +Future versions of {project_name} will change that behavior and use additional information from the configuration, the image and the version to determine if a rolling update is possible to reduce downtimes. + +|`Explicit` +|Only the `revision` field changes +|The {project_name} Operator checks the `spec.update.revision` value. +If it matches the previous deployment, it performs a rolling update. + +|=== + +=== Understanding `Auto` and `Explicit` Update Strategies + +When using the `Auto` update strategy, the {project_name} Operator automatically starts a Job to assess the feasibility of a rolling update. +Read more about the process in the <@links.server id="update-compatibility"/> {section}. +This process consumes cluster resources for the time of the check and introduces a slight delay before the StatefulSet update begins. + +[WARNING] +==== +If the Keycloak CR configured a `podTemplate` as part of the `unsupported` configuration parameters, the Keycloak Operator will do its best to use those settings for the started Job. Still it might miss some settings due to the flexibility of the `podTemplate` feature and its unsupported nature. + +As a consequence, the Operator might draw the wrong conclusions if a rolling update is possible from changes to the `podTemplate` or information pulled in from Secrets, ConfigMaps or Volumes in the `podTemplate`. + +Therefore, if you are using the unsupported `podTemplate`, you may need to use one of the other update strategies. +==== + +The `Explicit` update strategy delegates the update decision to the user. +The `revision` field acts as a user-controlled trigger. +While the {project_name} Operator does not interpret the `revision` value itself, any change to the Custom Resource (CR) while the `revision` remains unchanged will prompt a rolling update. + +Exercise caution when using this with automatic Operator upgrades. +The Operator Lifecycle Manager (OLM) may upgrade the {project_name} Operator, and if the `Explicit` update strategy is in use, this could lead to unexpected behavior or deployment failures as the Operator would attempt a rolling update when this is actually not supported. +**If you are using the `Explicit` update strategy, thorough testing in a non-production environment is highly recommended before upgrading.** + +=== CR Statuses + +The Keycloak CR status of `RecreateUpdateUsed` indicates the update strategy employed during the last update operation. +The `lastTransitionTime` field indicates when the last update occurred. +Use this information to observe actions and decisions taken by the Operator. + +[%autowidth] +.Condition statuses +|=== +|Status |Description + +m|Unknown +|The initial state. +It means no update has taken place. + +m|False +|The Operator applied the rolling update strategy in the last update. + +m|True +|The Operator applied the recreate update strategy in the last update. +The `message` field explains why this strategy was chosen. + +|=== + +[[operator-rolling-updates-for-patch-releases]] +== Rolling updates for patch releases + +WARNING: This behavior is currently in an experimental mode, and it is not recommended for use in production. + +It is possible to enable automatic rolling updates when upgrading to a newer patch version in the same `+major.minor+` release stream. + +To enable this behavior, enable feature `rolling-updates:v2` as shown in the following example: + +[source,yaml] +---- +apiVersion: k8s.keycloak.org/v2alpha1 +kind: Keycloak +metadata: + name: example-kc +spec: + features: + enabled: + - rolling-updates:v2 + update: + strategy: Auto +---- + +Read more about rolling updates for patch releases in the <@links.server id="update-compatibility" anchor="rolling-updates-for-patch-releases" /> {section}. + + + diff --git a/docs/guides/pom.xml b/docs/guides/pom.xml index dde0fa3350ad..9dbf7330f267 100644 --- a/docs/guides/pom.xml +++ b/docs/guides/pom.xml @@ -14,8 +14,9 @@ ~ See the License for the specific language governing permissions and ~ limitations under the License. --> - + keycloak-docs-parent org.keycloak @@ -28,7 +29,8 @@ Keycloak Guides keycloak-guides Keycloak Guides - + jar @@ -133,7 +135,7 @@ left font true - + - true ../images @@ -198,7 +200,8 @@ ${basedir}/target/generated-guides/getting-started - ${project.build.directory}/generated-docs/getting-started + + ${project.build.directory}/generated-docs/getting-started @@ -209,7 +212,8 @@ ${basedir}/target/generated-guides/high-availability - ${project.build.directory}/generated-docs/high-availability + + ${project.build.directory}/generated-docs/high-availability @@ -224,6 +228,19 @@ true
    + + ui-customization-asciidoc-to-html + generate-resources + + process-asciidoc + + + ${basedir}/target/generated-guides/ui-customization + + ${project.build.directory}/generated-docs/ui-customization + true + +
    @@ -246,4 +263,4 @@
    - + \ No newline at end of file diff --git a/docs/guides/securing-apps/client-registration-cli.adoc b/docs/guides/securing-apps/client-registration-cli.adoc index 3913a87e4c6b..92cdd1e164fc 100644 --- a/docs/guides/securing-apps/client-registration-cli.adoc +++ b/docs/guides/securing-apps/client-registration-cli.adoc @@ -2,9 +2,9 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Client registration CLI" +title="Automating client registration with the CLI" priority=110 -summary="Automating Client Registration with the CLI"> +summary="Use the CLI to automate client registration."> The Client Registration CLI is a command-line interface (CLI) tool for application developers to configure new clients in a self-service manner when integrating with {project_name}. It is specifically designed to interact with {project_name} Client Registration REST endpoints. diff --git a/docs/guides/securing-apps/client-registration.adoc b/docs/guides/securing-apps/client-registration.adoc index 647c81f45135..e881c6fe03d7 100644 --- a/docs/guides/securing-apps/client-registration.adoc +++ b/docs/guides/securing-apps/client-registration.adoc @@ -2,9 +2,9 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Client registration service" +title="Using the client registration service" priority=100 -summary="Using the client registration service"> +summary="Use the client registration service."> In order for an application or service to utilize {project_name} it has to register a client in {project_name}. An admin can do this through the admin console (or admin REST endpoints), but clients can also register themselves through the {project_name} client registration service. diff --git a/docs/guides/securing-apps/docker-registry.adoc b/docs/guides/securing-apps/docker-registry.adoc index 79a745c9eaf0..af8d15396ea3 100644 --- a/docs/guides/securing-apps/docker-registry.adoc +++ b/docs/guides/securing-apps/docker-registry.adoc @@ -2,9 +2,9 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Docker registry" +title="Configuring a Docker registry" priority=90 -summary="Configuring a Docker registry to use {project_name}"> +summary="Configure a Docker registry to use {project_name}."> NOTE: Docker authentication is disabled by default. To enable see the https://www.keycloak.org/server/features[Enabling and disabling features] {section}. diff --git a/docs/guides/securing-apps/javascript-adapter.adoc b/docs/guides/securing-apps/javascript-adapter.adoc deleted file mode 100644 index 3ef17e640a8d..000000000000 --- a/docs/guides/securing-apps/javascript-adapter.adoc +++ /dev/null @@ -1,558 +0,0 @@ -<#import "/templates/guide.adoc" as tmpl> -<#import "/templates/links.adoc" as links> - -<@tmpl.guide -title="{project_name} JavaScript adapter" -priority=30 -summary="Client-side JavaScript library that can be used to secure web applications."> - -{project_name} comes with a client-side JavaScript library called `keycloak-js` that can be used to secure web applications. The adapter also comes with built-in support for Cordova applications. -The adapter uses OpenID Connect protocol under the covers. You can take a look at the <@links.securingapps id="oidc-layers" anchor="_oidc_available_endpoints"/> {section} for the more generic information about OpenID Connect endpoints and capabilities. - -== Installation - -We recommend that you install the https://www.npmjs.com/package/keycloak-js[`keycloak-js`] package from NPM: - -[source,bash] ----- -npm install keycloak-js ----- - -== {project_name} server configuration - -One important thing to consider about using client-side applications is that the client has to be a public client as there is no secure way to store client credentials in a client-side application. This consideration makes it very important to make sure the redirect URIs you have configured for the client are correct and as specific as possible. - -To use the adapter, create a client for your application in the {project_name} Admin Console. Make the client public by toggling *Client authentication* to *Off* on the *Capability config* page. - -You also need to configure `Valid Redirect URIs` and `Web Origins`. Be as specific as possible as failing to do so may result in a security vulnerability. - -== Using the adapter - -The following example shows how to initialize the adapter. Make sure that you replace the options passed to the `Keycloak` constructor with those of the client you have configured. - -[source,javascript,subs="attributes+"] ----- -import Keycloak from 'keycloak-js'; - -const keycloak = new Keycloak({ - url: "http://keycloak-server", - realm: "my-realm", - clientId: "my-app" -}); - -try { - const authenticated = await keycloak.init(); - if (authenticated) { - console.log('User is authenticated'); - } else { - console.log('User is not authenticated'); - } -} catch (error) { - console.error('Failed to initialize adapter:', error); -} ----- - - - -To authenticate, you call the `login` function. Two options exist to make the adapter automatically authenticate. You can pass `login-required` or `check-sso` to the `init()` function. - -* `login-required` authenticates the client if the user is logged in to {project_name} or displays the login page if the user is not logged in. -* `check-sso` only authenticates the client if the user is already logged in. If the user is not logged in, the browser is redirected back to the application and remains unauthenticated. - -You can configure a _silent_ `check-sso` option. With this feature enabled, your browser will not perform a full redirect to the {project_name} server and back to your application, but this action will be performed in a hidden iframe. Therefore, your application resources are only loaded and parsed once by the browser, namely when the application is initialized and not again after the redirect back from {project_name} to your application. This approach is particularly useful in case of SPAs (Single Page Applications). - -To enable the _silent_ `check-sso`, you provide a `silentCheckSsoRedirectUri` attribute in the init method. Make sure this URI is a valid endpoint in the application; it must be configured as a valid redirect for the client in the {project_name} Admin Console: - -[source,javascript] ----- -await keycloak.init({ - onLoad: 'check-sso', - silentCheckSsoRedirectUri: `${r"${location.origin}"}/silent-check-sso.html` -}); ----- - -The page at the silent check-sso redirect uri is loaded in the iframe after successfully checking your authentication state and retrieving the tokens from the {project_name} server. -It has no other task than sending the received tokens to the main application and should only look like this: - -[source,html,subs="attributes+"] ----- - - - - - - ----- - -Remember that this page must be served by your application at the specified location in `silentCheckSsoRedirectUri` and is _not_ part of the adapter. - -WARNING: _Silent_ `check-sso` functionality is limited in some modern browsers. Please see the <<_modern_browsers,Modern Browsers with Tracking Protection Section>>. - -To enable `login-required` set `onLoad` to `login-required` and pass to the init method: - -[source,javascript] ----- -await keycloak.init({ - onLoad: 'login-required' -}); ----- - -After the user is authenticated the application can make requests to RESTful services secured by {project_name} by including the bearer token in the -`Authorization` header. For example: - -[source,javascript,subs="attributes+"] ----- -async function fetchUsers() { - const response = await fetch('/api/users', { - headers: { - accept: 'application/json', - authorization: `Bearer ${r"${keycloak.token}"}` - } - }); - - return response.json(); -} ----- - -One thing to keep in mind is that the access token by default has a short life expiration so you may need to refresh the access token prior to sending the request. You refresh this token by calling the `updateToken()` method. This method returns a Promise, which makes it easy to invoke the service only if the token was successfully refreshed and displays an error to the user if it was not refreshed. For example: - -[source,javascript] ----- -try { - await keycloak.updateToken(30); -} catch (error) { - console.error('Failed to refresh token:', error); -} - -const users = await fetchUsers(); ----- - -[NOTE] -==== -Both access and refresh token are stored in memory and are not persisted in any kind of storage. Therefore, these tokens should never be persisted to prevent hijacking attacks. -==== - -== Session Status iframe - -By default, the adapter creates a hidden iframe that is used to detect if a Single-Sign Out has occurred. This iframe does not require any network traffic. Instead the status is retrieved by looking at a special status cookie. This feature can be disabled by setting `checkLoginIframe: false` in the options passed to the `init()` method. - -You should not rely on looking at this cookie directly. Its format can change and it's also associated with the URL of the {project_name} server, not -your application. - -WARNING: Session Status iframe functionality is limited in some modern browsers. Please see <<_modern_browsers,Modern Browsers with Tracking Protection Section>>. - -[[_javascript_implicit_flow]] -== Implicit and hybrid flow - -By default, the adapter uses the https://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code] flow. - -With this flow, the {project_name} server returns an authorization code, not an authentication token, to the application. The JavaScript adapter exchanges the `code` for an access token and a refresh token after the browser is redirected back to the application. - -{project_name} also supports the https://openid.net/specs/openid-connect-core-1_0.html#ImplicitFlowAuth[Implicit] flow where an access token is sent immediately after successful authentication with {project_name}. This flow may have better performance than the standard flow because no additional request exists to exchange the code for tokens, but it has implications when the access token expires. - -However, sending the access token in the URL fragment can be a security vulnerability. For example the token could be leaked through web server logs and or -browser history. - -To enable implicit flow, you enable the *Implicit Flow Enabled* flag for the client in the {project_name} Admin Console. You also pass the parameter `flow` with the value `implicit` to `init` method: - -[source,javascript] ----- -await keycloak.init({ - flow: 'implicit' -}) ----- - -Note that only an access token is provided and no refresh token exists. This situation means that once the access token has expired, the application has to redirect to {project_name} again to obtain a new access token. - -{project_name} also supports the https://openid.net/specs/openid-connect-core-1_0.html#HybridFlowAuth[Hybrid] flow. - -This flow requires the client to have both the *Standard Flow* and *Implicit Flow* enabled in the Admin Console. The {project_name} server then sends both the code and tokens to your application. The access token can be used immediately while the code can be exchanged for access and refresh tokens. Similar to the implicit flow, the hybrid flow is good for performance because the access token is available immediately. -But, the token is still sent in the URL, and the security vulnerability mentioned earlier may still apply. - -One advantage in the Hybrid flow is that the refresh token is made available to the application. - -For the Hybrid flow, you need to pass the parameter `flow` with value `hybrid` to the `init` method: - -[source,javascript] ----- -await keycloak.init({ - flow: 'hybrid' -}); ----- - -[#hybrid-apps-with-cordova] -== Hybrid Apps with Cordova - -{project_name} supports hybrid mobile apps developed with https://cordova.apache.org/[Apache Cordova]. The adapter has two modes for this: `cordova` and `cordova-native`: - -The default is `cordova`, which the adapter automatically selects if no adapter type has been explicitly configured and `window.cordova` is present. When logging in, it opens an https://cordova.apache.org/docs/en/latest/reference/cordova-plugin-inappbrowser/[InApp Browser] that lets the user interact with {project_name} and afterwards returns to the app by redirecting to `http://localhost`. Because of this behavior, you whitelist this URL as a valid redirect-uri in the client configuration section of the Admin Console. - -While this mode is easy to set up, it also has some disadvantages: - -* The InApp-Browser is a browser embedded in the app and is not the phone's default browser. Therefore it will have different settings and stored credentials will not be available. -* The InApp-Browser might also be slower, especially when rendering more complex themes. -* There are security concerns to consider, before using this mode, such as that it is possible for the app to gain access to the credentials of the user, as it has full control of the browser rendering the login page, so do not allow its use in apps you do not trust. - -The alternative mode is`cordova-native`, which takes a different approach. It opens the login page using the system's browser. After the user has authenticated, the browser redirects back into the application using a special URL. From there, the {project_name} adapter can finish the login by reading the code or token from the URL. - -You can activate the native mode by passing the adapter type `cordova-native` to the `init()` method: - -[source,javascript] ----- -await keycloak.init({ - adapter: 'cordova-native' -}); ----- - -This adapter requires two additional plugins: - -* https://github.com/google/cordova-plugin-browsertab[cordova-plugin-browsertab]: allows the app to open webpages in the system's browser -* https://github.com/e-imaxina/cordova-plugin-deeplinks[cordova-plugin-deeplinks]: allow the browser to redirect back to your app by special URLs - -The technical details for linking to an app differ on each platform and special setup is needed. -Please refer to the Android and iOS sections of the https://github.com/e-imaxina/cordova-plugin-deeplinks/blob/master/README.md[deeplinks plugin documentation] for further instructions. - -Different kinds of links exist for opening apps: - -* custom schemes, such as `myapp://login` or `android-app://com.example.myapp/https/example.com/login`. -* https://developer.apple.com/documentation/xcode/allowing-apps-and-websites-to-link-to-your-content/[Universal Links (iOS)] / https://developer.android.com/training/app-links/deep-linking[Deep Links (Android)]. - -While the former are easier to set up and tend to work more reliably, the latter offer extra security because they are unique and only the owner of a domain can register them. Custom-URLs are deprecated on iOS. For best reliability, we recommend that you use universal links combined with a fallback site that uses a custom-url link. - -Furthermore, we recommend the following steps to improve compatibility with the adapter: - -* Universal Links on iOS seem to work more reliably with `response-mode` set to `query` -* To prevent Android from opening a new instance of your app on redirect add the following snippet to `config.xml`: - -[source,xml] ----- - ----- - -[#custom-adapters] -== Custom Adapters - -In some situations, you may need to run the adapter in environments that are not supported by default, such as Capacitor. To use the JavasScript client in these environments, you can pass a custom adapter. For example, a third-party library could provide such an adapter to make it possible to reliably run the adapter: - -[source,javascript] ----- -import Keycloak from 'keycloak-js'; -import KeycloakCapacitorAdapter from 'keycloak-capacitor-adapter'; - -const keycloak = new Keycloak({ - url: "http://keycloak-server", - realm: "my-realm", - clientId: "my-app" -}); - -await keycloak.init({ - adapter: KeycloakCapacitorAdapter, -}); ----- - -This specific package does not exist, but it gives a pretty good example of how such an adapter could be passed into the client. - -It's also possible to make your own adapter, to do so you will have to implement the methods described in the `KeycloakAdapter` interface. For example the following TypeScript code ensures that all the methods are properly implemented: - -[source,typescript] ----- -import Keycloak, { KeycloakAdapter } from 'keycloak-js'; - -// Implement the 'KeycloakAdapter' interface so that all required methods are guaranteed to be present. -const MyCustomAdapter: KeycloakAdapter = { - async login(options) { - // Write your own implementation here. - } - - // The other methods go here... -}; - -const keycloak = new Keycloak({ - url: "http://keycloak-server", - realm: "my-realm", - clientId: "my-app" -}); - -await keycloak.init({ - adapter: MyCustomAdapter, -}); ----- - -Naturally you can also do this without TypeScript by omitting the type information, but ensuring implementing the interface properly will then be left entirely up to you. - -[[_modern_browsers]] -== Modern Browsers with Tracking Protection -In the latest versions of some browsers, various cookies policies are applied to prevent tracking of the users by third parties, such as SameSite in Chrome or completely blocked third-party cookies. Those policies are likely to become more restrictive and adopted by other browsers over time. Eventually cookies in third-party contexts may become completely unsupported and blocked by the browsers. As a result, the affected adapter features might ultimately be deprecated. - -The adapter relies on third-party cookies for Session Status iframe, _silent_ `check-sso` and partially also for regular (non-silent) `check-sso`. Those features have limited functionality or are completely disabled based on how restrictive the browser is regarding cookies. The adapter tries to detect this setting and reacts accordingly. - -=== Browsers with "SameSite=Lax by Default" Policy -All features are supported if SSL / TLS connection is configured on the {project_name} side as well as on the application side. For example, Chrome is affected starting with version 84. - -=== Browsers with Blocked Third-Party Cookies -Session Status iframe is not supported and is automatically disabled if such browser behavior is detected by the adapter. This means the adapter cannot use a session cookie for Single Sign-Out detection and must rely purely on tokens. As a result, when a user logs out in another window, the application using the adapter will not be logged out until the application tries to refresh the Access Token. Therefore, consider setting the Access Token Lifespan to a relatively short time, so that the logout is detected as soon as possible. For more details, see link:{adminguide_link}#_timeouts[Session and Token Timeouts]. - -_Silent_ `check-sso` is not supported and falls back to regular (non-silent) `check-sso` by default. This behavior can be changed by setting `silentCheckSsoFallback: false` in the options passed to the `init` method. In this case, `check-sso` will be completely disabled if restrictive browser behavior is detected. - -Regular `check-sso` is affected as well. Since Session Status iframe is unsupported, an additional redirect to {project_name} has to be made when the adapter is initialized to check the user's login status. This check is different from the standard behavior when the iframe is used to tell whether the user is logged in, and the redirect is performed only when the user is logged out. - -An affected browser is for example Safari starting with version 13.1. - -== API Reference - -=== Constructor - -[source,javascript,subs="attributes+"] ----- -// Recommended way to initialize the adapter. -new Keycloak({ - url: "http://keycloak-server", - realm: "my-realm", - clientId: "my-app" -}); - -// Alternatively a string to the path of the `keycloak.json` file. -// Has some performance implications, as it will load the keycloak.json file from the server. -// This version might also change in the future and is therefore not recommended. -new Keycloak("http://keycloak-server/keycloak.json"); ----- - -=== Properties - -authenticated:: - Is `true` if the user is authenticated, `false` otherwise. - -token:: - The base64 encoded token that can be sent in the `Authorization` header in requests to services. - -tokenParsed:: - The parsed token as a JavaScript object. - -subject:: - The user id. - -idToken:: - The base64 encoded ID token. - -idTokenParsed:: - The parsed id token as a JavaScript object. - -realmAccess:: - The realm roles associated with the token. - -resourceAccess:: - The resource roles associated with the token. - -refreshToken:: - The base64 encoded refresh token that can be used to retrieve a new token. - -refreshTokenParsed:: - The parsed refresh token as a JavaScript object. - -timeSkew:: - The estimated time difference between the browser time and the {project_name} server in seconds. This value is just an estimation, but is accurate - enough when determining if a token is expired or not. - -responseMode:: - Response mode passed in init (default value is fragment). - -flow:: - Flow passed in init. - -adapter:: - Allows you to override the way that redirects and other browser-related functions will be handled by the library. - Available options: - * "default" - the library uses the browser api for redirects (this is the default) - * "cordova" - the library will try to use the InAppBrowser cordova plugin to load keycloak login/registration pages (this is used automatically when the library is working in a cordova ecosystem) - * "cordova-native" - the library tries to open the login and registration page using the phone's system browser using the BrowserTabs cordova plugin. This requires extra setup for redirecting back to the app (see <>). - * "custom" - allows you to implement a custom adapter (only for advanced use cases) - -responseType:: - Response type sent to {project_name} with login requests. This is determined based on the flow value used during initialization, but can be overridden by setting this value. - -=== Methods - -*init(options)* - -Called to initialize the adapter. - -Options is an Object, where: - -* useNonce - Adds a cryptographic nonce to verify that the authentication response matches the request (default is `true`). -* onLoad - Specifies an action to do on load. Supported values are `login-required` or `check-sso`. -* silentCheckSsoRedirectUri - Set the redirect uri for silent authentication check if onLoad is set to 'check-sso'. -* silentCheckSsoFallback - Enables fall back to regular `check-sso` when _silent_ `check-sso` is not supported by the browser (default is `true`). -* token - Set an initial value for the token. -* refreshToken - Set an initial value for the refresh token. -* idToken - Set an initial value for the id token (only together with token or refreshToken). -* scope - Set the default scope parameter to the {project_name} login endpoint. Use a space-delimited list of scopes. Those typically -reference link:{adminguide_link}#_client_scopes[Client scopes] defined on a particular client. Note that the scope `openid` will -always be added to the list of scopes by the adapter. For example, if you enter the scope options `address phone`, then the request -to {project_name} will contain the scope parameter `scope=openid address phone`. Note that the default scope specified here is overwritten if the `login()` options specify scope explicitly. -* timeSkew - Set an initial value for skew between local time and {project_name} server in seconds (only together with token or refreshToken). -* checkLoginIframe - Set to enable/disable monitoring login state (default is `true`). -* checkLoginIframeInterval - Set the interval to check login state (default is 5 seconds). -* responseMode - Set the OpenID Connect response mode send to {project_name} server at login request. Valid values are `query` or `fragment`. Default value is `fragment`, which means that after successful authentication will {project_name} redirect to JavaScript application with OpenID Connect parameters added in URL fragment. This is generally safer and recommended over `query`. -* flow - Set the OpenID Connect flow. Valid values are `standard`, `implicit` or `hybrid`. -* enableLogging - Enables logging messages from Keycloak to the console (default is `false`). -* pkceMethod - The method for Proof Key Code Exchange (https://datatracker.ietf.org/doc/html/rfc7636[PKCE]) to use. Configuring this value enables the PKCE mechanism. Available options: - - "S256" - The SHA256 based PKCE method (default) - - false - PKCE is disabled. -* acrValues - Generates the `acr_values` parameter which refers to authentication context class reference and allows clients to declare the required assurance level requirements, e.g. authentication mechanisms. See https://openid.net/specs/openid-connect-modrna-authentication-1_0.html#acr_values[Section 4. acr_values request values and level of assurance in OpenID Connect MODRNA Authentication Profile 1.0]. -* messageReceiveTimeout - Set a timeout in milliseconds for waiting for message responses from the Keycloak server. This is used, for example, when waiting for a message during 3rd party cookies check. The default value is 10000. -* locale - When onLoad is 'login-required', sets the 'ui_locales' query param in compliance with https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest[section 3.1.2.1 of the OIDC 1.0 specification]. - -Returns a promise that resolves when initialization completes. - -*login(options)* - -Redirects to login form, returns a Promise. - -Options is an optional Object, where: - -* redirectUri - Specifies the uri to redirect to after login. -* prompt - This parameter allows to slightly customize the login flow on the {project_name} server side. -For example, enforce displaying the login screen in case of value `login`. Or enforce displaying of consent screen for the value `consent` in case that client has `Consent Required`. -Finally it is possible use the value `none` to make sure that login screen is not displayed to the user, which is useful just to check SSO for the case when user was already -authenticated before (This is related to the `onLoad` check with value `check-sso` described above). -* maxAge - Used just if user is already authenticated. Specifies maximum time since the authentication of user happened. If user is already authenticated for longer time than `maxAge`, the SSO is ignored and he will need to re-authenticate again. -* loginHint - Used to pre-fill the username/email field on the login form. -* scope - Override the scope configured in `init` with a different value for this specific login. -* idpHint - Used to tell {project_name} to skip showing the login page and automatically redirect to the specified identity -provider instead. More info in the link:{adminguide_link}#_client_suggested_idp[Identity Provider documentation]. -* acr - Contains the information about `acr` claim, which will be sent inside `claims` parameter to the {project_name} server. Typical usage -is for step-up authentication. Example of use `{ values: ["silver", "gold"], essential: true }`. See OpenID Connect specification -and link:{adminguide_link}#_step-up-flow[Step-up authentication documentation] for more details. -* acrValues - Generates the `acr_values` parameter which refers to authentication context class reference and allows clients to declare the required assurance level requirements, e.g. authentication mechanisms. See https://openid.net/specs/openid-connect-modrna-authentication-1_0.html#acr_values[Section 4. acr_values request values and level of assurance in OpenID Connect MODRNA Authentication Profile 1.0]. -* action - If the value is `register`, the user is redirected to the registration page. See link:{adminguide_link}#_registration-rc-client-flows[Registration requested by client section] for more details. -If the value is `UPDATE_PASSWORD` or another supported required action, the user will be redirected to the reset password page or the other required action page. However, if the user is not authenticated, the user will be sent to the login page and redirected after authentication. -See link:{adminguide_link}#con-aia_server_administration_guide[Application Initiated Action section] for more details. -* locale - Sets the 'ui_locales' query param in compliance with https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest[section 3.1.2.1 of the OIDC 1.0 specification]. -* cordovaOptions - Specifies the arguments that are passed to the Cordova in-app-browser (if applicable). Options `hidden` and `location` are not affected by these arguments. All available options are defined at https://cordova.apache.org/docs/en/latest/reference/cordova-plugin-inappbrowser/. Example of use: `{ zoom: "no", hardwareback: "yes" }`; - -*createLoginUrl(options)* - -Returns a Promise containing the URL to login form. - -Options is an optional Object, which supports same options as the function `login` . - -*logout(options)* - -Redirects to logout. - -Options is an Object, where: - -* redirectUri - Specifies the uri to redirect to after logout. - -*createLogoutUrl(options)* - -Returns the URL to log out the user. - -Options is an Object, where: - -* redirectUri - Specifies the uri to redirect to after logout. - -*register(options)* - -Redirects to registration form. Shortcut for login with option action = 'register' - -Options are same as for the login method but 'action' is set to 'register' - -*createRegisterUrl(options)* - -Returns a Promise containing the url to registration page. Shortcut for createLoginUrl with option action = 'register' - -Options are same as for the createLoginUrl method but 'action' is set to 'register' - -*accountManagement()* - -Redirects to the Account Console. - -*createAccountUrl(options)* - -Returns the URL to the Account Console. - -Options is an Object, where: - -* redirectUri - Specifies the uri to redirect to when redirecting back to the application. - -*hasRealmRole(role)* - -Returns true if the token has the given realm role. - -*hasResourceRole(role, resource)* - -Returns true if the token has the given role for the resource (resource is optional, if not specified clientId is used). - -*loadUserProfile()* - -Loads the users profile. - -Returns a promise that resolves with the profile. - -For example: - -[source,javascript] ----- -try { - const profile = await keycloak.loadUserProfile(); - console.log('Retrieved user profile:', profile); -} catch (error) { - console.error('Failed to load user profile:', error); -} ----- - -*isTokenExpired(minValidity)* - -Returns true if the token has less than minValidity seconds left before it expires (minValidity is optional, if not specified 0 is used). - -*updateToken(minValidity)* - -If the token expires within minValidity seconds (minValidity is optional, if not specified 5 is used) the token is refreshed. -If -1 is passed as the minValidity, the token will be forcibly refreshed. -If the session status iframe is enabled, the session status is also checked. - -Returns a promise that resolves with a boolean indicating whether or not the token has been refreshed. - -For example: - -[source,javascript] ----- -try { - const refreshed = await keycloak.updateToken(5); - console.log(refreshed ? 'Token was refreshed' : 'Token is still valid'); -} catch (error) { - console.error('Failed to refresh the token:', error); -} ----- - -*clearToken()* - -Clear authentication state, including tokens. -This can be useful if application has detected the session was expired, for example if updating token fails. - -Invoking this results in onAuthLogout callback listener being invoked. - -=== Callback Events - -The adapter supports setting callback listeners for certain events. Keep in mind that these have to be set before the call to the `init()` method. - -For example: -[source,javascript] ----- -keycloak.onAuthSuccess = () => console.log('Authenticated!'); ----- - -The available events are: - -* *onReady(authenticated)* - Called when the adapter is initialized. -* *onAuthSuccess* - Called when a user is successfully authenticated. -* *onAuthError* - Called if there was an error during authentication. -* *onAuthRefreshSuccess* - Called when the token is refreshed. -* *onAuthRefreshError* - Called if there was an error while trying to refresh the token. -* *onAuthLogout* - Called if the user is logged out (will only be called if the session status iframe is enabled, or in Cordova mode). -* *onTokenExpired* - Called when the access token is expired. If a refresh token is available the token can be refreshed with updateToken, or in cases where it is not (that is, with implicit flow) you can redirect to the login screen to obtain a new access token. - - diff --git a/docs/guides/securing-apps/mod-auth-mellon.adoc b/docs/guides/securing-apps/mod-auth-mellon.adoc index a36d8b4a2fd9..7338003802d5 100644 --- a/docs/guides/securing-apps/mod-auth-mellon.adoc +++ b/docs/guides/securing-apps/mod-auth-mellon.adoc @@ -2,9 +2,9 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="mod_auth_mellon Apache Module" +title="Configuring the mod_auth_mellon Apache Module" priority=80 -summary="Configuring the mod_auth_mellon Apache module with {project_name}"> +summary="Configure the mod_auth_mellon Apache module with {project_name}."> The https://github.com/latchset/mod_auth_mellon[mod_auth_mellon] is an authentication module for Apache. If your language/environment supports using Apache HTTPD as a proxy, then you can use mod_auth_mellon to secure your web application with SAML. For more details on this module see the _mod_auth_mellon_ GitHub repo. diff --git a/docs/guides/securing-apps/mod-auth-openidc.adoc b/docs/guides/securing-apps/mod-auth-openidc.adoc index 8ceb2b660ebe..c1e30cf2b121 100644 --- a/docs/guides/securing-apps/mod-auth-openidc.adoc +++ b/docs/guides/securing-apps/mod-auth-openidc.adoc @@ -2,9 +2,9 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="mod_auth_openidc Apache HTTPD Module" +title="Configuring the mod_auth_openidc Apache HTTPD Module" priority=50 -summary="Configuring the mod_auth_openidc Apache module with {project_name}"> +summary="Configure the mod_auth_openidc Apache module with {project_name}."> WARNING: {project_name} does not provide any official support to mod_auth_openidc. The instructions below are best-effort and may not be up-to-date. diff --git a/docs/guides/securing-apps/nodejs-adapter.adoc b/docs/guides/securing-apps/nodejs-adapter.adoc deleted file mode 100644 index 0d40b5a1a78e..000000000000 --- a/docs/guides/securing-apps/nodejs-adapter.adoc +++ /dev/null @@ -1,381 +0,0 @@ -<#import "/templates/guide.adoc" as tmpl> -<#import "/templates/links.adoc" as links> - -<@tmpl.guide -title="{project_name} Node.js adapter" -priority=40 -summary="Node.js adapter to protect server-side JavaScript apps"> - -{project_name} provides a Node.js adapter built on top of https://github.com/senchalabs/connect[Connect] to protect server-side JavaScript apps - the goal was to be flexible enough to integrate with frameworks like https://expressjs.com/[Express.js]. -The adapter uses OpenID Connect protocol under the covers. You can take a look at the <@links.securingapps id="oidc-layers" anchor="_oidc_available_endpoints"/> {section} for the more generic information about OpenID Connect endpoints and capabilities. - -ifeval::[{project_community}==true] -The library can be downloaded directly from https://www.npmjs.com/package/keycloak-connect[ {project_name} organization] and the source is available at -https://github.com/keycloak/keycloak-nodejs-connect[GitHub]. -endif::[] - -To use the Node.js adapter, first you must create a client for your application in the {project_name} Admin Console. The adapter supports public, confidential, and bearer-only access type. Which one to choose depends on the use-case scenario. - -Once the client is created, click *Action* at the top right and choose *Download adapter config*. For *Format, choose *Keycloak OIDC JSON* and click *Download*. The downloaded `keycloak.json` file is at the root folder of your project. - -== Installation - -Assuming you have already installed https://nodejs.org[Node.js], create a folder for your application: - - mkdir myapp && cd myapp - -Use `npm init` command to create a `package.json` for your application. Now add the {project_name} connect adapter in the dependencies list: - -ifeval::[{project_community}==true] - -[source,json,subs="attributes"] ----- - "dependencies": { - "keycloak-connect": "{project_versionNpm}" - } ----- - -endif::[] - -ifeval::[{project_product}==true] - -[source,json,subs="attributes"] ----- - "dependencies": { - "keycloak-connect": "file:keycloak-connect-{project_versionNpm}.tgz" - } ----- - -endif::[] - -== Usage -Instantiate a Keycloak class:: - -The `Keycloak` class provides a central point for configuration -and integration with your application. The simplest creation -involves no arguments. - -In the root directory of your project create a file called `server.js` and add the following code: - -[source,javascript] ----- - const session = require('express-session'); - const Keycloak = require('keycloak-connect'); - - const memoryStore = new session.MemoryStore(); - const keycloak = new Keycloak({ store: memoryStore }); ----- - -Install the `express-session` dependency: - ----- - npm install express-session ----- - -To start the `server.js` script, add the following command in the 'scripts' section of the `package.json`: - ----- - "scripts": { - "test": "echo \"Error: no test specified\" && exit 1", - "start": "node server.js" - }, ----- - -Now we have the ability to run our server with following command: - ----- - npm run start ----- - -By default, this will locate a file named `keycloak.json` alongside -the main executable of your application, in our case on the root folder, to initialize {project_name} specific -settings such as public key, realm name, various URLs. - -In that case a {project_name} deployment is necessary to access {project_name} admin console. - -Please visit links on how to deploy a {project_name} admin console with -https://www.keycloak.org/getting-started/getting-started-podman[Podman] or https://www.keycloak.org/getting-started/getting-started-docker[Docker] - -Now we are ready to obtain the `keycloak.json` file by visiting the {project_name} Admin Console -> clients (left sidebar) -> choose your client -> Installation -> Format Option -> Keycloak OIDC JSON -> Download - -Paste the downloaded file on the root folder of our project. - -Instantiation with this method results in all the reasonable defaults -being used. As alternative, it's also possible to provide a configuration -object, rather than the `keycloak.json` file: - -[source,javascript,subs="attributes+"] ----- - const kcConfig = { - clientId: 'myclient', - bearerOnly: true, - serverUrl: 'http://localhost:8080{kc_base_path}', - realm: 'myrealm', - realmPublicKey: 'MIIBIjANB...' - }; - - const keycloak = new Keycloak({ store: memoryStore }, kcConfig); ----- - -Applications can also redirect users to their preferred identity provider by using: -[source,javascript] ----- - const keycloak = new Keycloak({ store: memoryStore, idpHint: myIdP }, kcConfig); ----- - -Configuring a web session store:: - -If you want to use web sessions to manage -server-side state for authentication, you need to initialize the -`Keycloak(...)` with at least a `store` parameter, passing in the actual -session store that `express-session` is using. -[source,javascript] ----- - const session = require('express-session'); - const memoryStore = new session.MemoryStore(); - - // Configure session - app.use( - session({ - secret: 'mySecret', - resave: false, - saveUninitialized: true, - store: memoryStore, - }) - ); - - const keycloak = new Keycloak({ store: memoryStore }); ----- -Passing a custom scope value:: - -By default, the scope value `openid` is passed as a query parameter to {project_name}'s login URL, but you can add an additional custom value: -[source,javascript] - const keycloak = new Keycloak({ scope: 'offline_access' }); - -== Installing middleware - -Once instantiated, install the middleware into your connect-capable app: - -In order to do so, first we have to install Express: ----- - npm install express ----- - -then require Express in our project as outlined below: - -[source,javascript] ----- - const express = require('express'); - const app = express(); ----- - - -and configure Keycloak middleware in Express, by adding at the code below: - -[source,javascript] ----- - app.use( keycloak.middleware() ); ----- - -Last but not least, let's set up our server to listen for HTTP requests on port 3000 by adding the following code to `main.js`: - -[source,javascript] ----- - app.listen(3000, function () { - console.log('App listening on port 3000'); - }); ----- - -== Configuration for proxies - -If the application is running behind a proxy that terminates an SSL connection -Express must be configured per the link:https://expressjs.com/en/guide/behind-proxies.html[express behind proxies] guide. -Using an incorrect proxy configuration can result in invalid redirect URIs -being generated. - -Example configuration: - -[source,javascript] ----- - const app = express(); - - app.set( 'trust proxy', true ); - - app.use( keycloak.middleware() ); ----- - -== Protecting resources - -Simple authentication:: - -To enforce that a user must be authenticated before accessing a resource, -simply use a no-argument version of `keycloak.protect()`: - -[source,javascript] ----- - app.get( '/complain', keycloak.protect(), complaintHandler ); ----- - -Role-based authorization:: - -To secure a resource with an application role for the current app: - -[source,javascript] ----- - app.get( '/special', keycloak.protect('special'), specialHandler ); ----- - -To secure a resource with an application role for a *different* app: - -[source,javascript] - app.get( '/extra-special', keycloak.protect('other-app:special'), extraSpecialHandler ); - -To secure a resource with a realm role: - -[source,javascript] - app.get( '/admin', keycloak.protect( 'realm:admin' ), adminHandler ); - -Resource-Based Authorization:: - -Resource-Based Authorization allows you to protect resources, and their specific methods/actions,**** based on a set of policies defined in Keycloak, thus externalizing authorization from your application. This is achieved by exposing a `keycloak.enforcer` method which you can use to protect resources.* - -[source,javascript] ----- - app.get('/apis/me', keycloak.enforcer('user:profile'), userProfileHandler); ----- - -The `keycloak-enforcer` method operates in two modes, depending on the value of the `response_mode` configuration option. - -[source,javascript] ----- - app.get('/apis/me', keycloak.enforcer('user:profile', {response_mode: 'token'}), userProfileHandler); ----- - -If `response_mode` is set to `token`, permissions are obtained from the server on behalf of the subject represented by the bearer token that was sent to your application. In this case, a new access token is issued by Keycloak with the permissions granted by the server. If the server did not respond with a token with the expected permissions, the request is denied. When using this mode, you should be able to obtain the token from the request as follows: - -[source,javascript] ----- - app.get('/apis/me', keycloak.enforcer('user:profile', {response_mode: 'token'}), function (req, res) { - const token = req.kauth.grant.access_token.content; - const permissions = token.authorization ? token.authorization.permissions : undefined; - - // show user profile - }); ----- - -Prefer this mode when your application is using sessions and you want to cache previous decisions from the server, as well automatically handle refresh tokens. This mode is especially useful for applications acting as a client and resource server. - -If `response_mode` is set to `permissions` (default mode), the server only returns the list of granted permissions, without issuing a new access token. In addition to not issuing a new token, this method exposes the permissions granted by the server through the `request` as follows: - -[source,javascript] ----- - app.get('/apis/me', keycloak.enforcer('user:profile', {response_mode: 'permissions'}), function (req, res) { - const permissions = req.permissions; - - // show user profile - }); ----- - -Regardless of the `response_mode` in use, the `keycloak.enforcer` method will first try to check the permissions within the bearer token that was sent to your application. If the bearer token already carries the expected permissions, there is no need -to interact with the server to obtain a decision. This is specially useful when your clients are capable of obtaining access tokens from the server with the expected permissions before accessing a protected resource, so they can use some capabilities provided by Keycloak Authorization Services such as incremental authorization and avoid additional requests to the server when `keycloak.enforcer` is enforcing access to the resource. - -By default, the policy enforcer will use the `client_id` defined to the application (for instance, via `keycloak.json`) to - reference a client in Keycloak that supports Keycloak Authorization Services. In this case, the client can not be public given - that it is actually a resource server. - -If your application is acting as both a public client(frontend) and resource server(backend), you can use the following configuration to reference a different -client in Keycloak with the policies that you want to enforce: - -[source,javascript] ----- - keycloak.enforcer('user:profile', {resource_server_id: 'my-apiserver'}) ----- - -It is recommended to use distinct clients in Keycloak to represent your frontend and backend. - -If the application you are protecting is enabled with Keycloak authorization services and you have defined client credentials - in `keycloak.json`, you can push additional claims to the server and make them available to your policies in order to make decisions. -For that, you can define a `claims` configuration option which expects a `function` that returns a JSON with the claims you want to push: - -[source,javascript] ----- - app.get('/protected/resource', keycloak.enforcer(['resource:view', 'resource:write'], { - claims: function(request) { - return { - "http.uri": ["/protected/resource"], - "user.agent": // get user agent from request - } - } - }), function (req, res) { - // access granted ----- - -For more details about how to configure Keycloak to protected your application resources, please take a look at the link:{authorizationguide_link}[{authorizationguide_name}]. - -Advanced authorization:: - -To secure resources based on parts of the URL itself, assuming a role exists -for each section: - -[source,javascript] ----- - function protectBySection(token, request) { - return token.hasRole( request.params.section ); - } - - app.get( '/:section/:page', keycloak.protect( protectBySection ), sectionHandler ); ----- - -Advanced Login Configuration: - -By default, all unauthorized requests will be redirected to the {project_name} login page unless your client is bearer-only. -However, a confidential or public client may host both browsable and API endpoints. To prevent redirects on unauthenticated -API requests and instead return an HTTP 401, you can override the redirectToLogin function. - -For example, this override checks if the URL contains /api/ and disables login redirects: - -[source,javascript] ----- - Keycloak.prototype.redirectToLogin = function(req) { - const apiReqMatcher = /\/api\//i; - return !apiReqMatcher.test(req.originalUrl || req.url); - }; ----- - -== Additional URLs - -Explicit user-triggered logout:: - -By default, the middleware catches calls to `/logout` to send the user through a -{project_name}-centric logout workflow. This can be changed by specifying a `logout` -configuration parameter to the `middleware()` call: - -[source,javascript] ----- - app.use( keycloak.middleware( { logout: '/logoff' } )); ----- - -When the user-triggered logout is invoked a query parameter `redirect_url` can be passed: - -[source] ----- -https://example.com/logoff?redirect_url=https%3A%2F%2Fexample.com%3A3000%2Flogged%2Fout ----- - -This parameter is then used as the redirect url of the OIDC logout endpoint and the user will be redirected to -`\https://example.com/logged/out`. - -{project_name} Admin Callbacks:: - -Also, the middleware supports callbacks from the {project_name} console to log out a single -session or all sessions. By default, these type of admin callbacks occur relative -to the root URL of `/` but can be changed by providing an `admin` parameter -to the `middleware()` call: -[source,javascript] - app.use( keycloak.middleware( { admin: '/callbacks' } ); - -== Complete example - -A complete example using the Node.js adapter usage can be found in {quickstartRepo_link}/tree/latest/nodejs/resource-server[Keycloak quickstarts for Node.js] - - diff --git a/docs/guides/securing-apps/oidc-layers.adoc b/docs/guides/securing-apps/oidc-layers.adoc index d504a772a2a8..e8fc4f5a6911 100644 --- a/docs/guides/securing-apps/oidc-layers.adoc +++ b/docs/guides/securing-apps/oidc-layers.adoc @@ -2,11 +2,11 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Secure applications and services with OpenID Connect" +title="Securing applications and services with OpenID Connect" priority=20 -summary="Using OpenID Connect with Keycloak to secure applications and services"> +summary="Use OpenID Connect with {project_name} to secure applications and services."> -include::partials/oidc/available-endpoints.adoc[] +<#include "partials/oidc/available-endpoints.adoc" /> include::partials/oidc/supported-grant-types.adoc[] diff --git a/docs/guides/securing-apps/overview.adoc b/docs/guides/securing-apps/overview.adoc index e54603f91cf0..071c42acc902 100644 --- a/docs/guides/securing-apps/overview.adoc +++ b/docs/guides/securing-apps/overview.adoc @@ -4,9 +4,9 @@ <@tmpl.guide title="Planning for securing applications and services" priority=10 -summary="Introduction and basic concepts for securing applications"> +summary="Understand basic concepts for securing applications."> -As an OAuth2, OpenID Connect, and SAML compliant server, {project_name} can secure any application and service as long +As an OAuth2, OpenID Connect and SAML compliant server, {project_name} can secure any application and service as long as the technology stack they are using supports any of these protocols. For more details about the security protocols supported by {project_name}, consider looking at link:{adminguide_link}#sso-protocols[{adminguide_name}]. diff --git a/docs/guides/securing-apps/partials/oidc/available-endpoints.adoc b/docs/guides/securing-apps/partials/oidc/available-endpoints.adoc index dd551d620e61..a18dc3f260c6 100644 --- a/docs/guides/securing-apps/partials/oidc/available-endpoints.adoc +++ b/docs/guides/securing-apps/partials/oidc/available-endpoints.adoc @@ -69,6 +69,8 @@ The endpoint can also be invoked directly by the application. To invoke this end The certificate endpoint returns the public keys enabled by the realm, encoded as a JSON Web Key (JWK). Depending on the realm settings, one or more keys can be enabled for verifying tokens. For more information, see the link:{adminguide_link}[{adminguide_name}] and the https://datatracker.ietf.org/doc/html/rfc7517[JSON Web Key specification]. +For more details, see the https://openid.net/specs/openid-connect-discovery-1_0.html[OpenID Connect Discovery] specification. + [[_token_introspection_endpoint]] ==== Introspection endpoint .... diff --git a/docs/guides/securing-apps/partials/oidc/oauth21-support.adoc b/docs/guides/securing-apps/partials/oidc/oauth21-support.adoc index b4cbc4443baf..3639ab60768d 100644 --- a/docs/guides/securing-apps/partials/oidc/oauth21-support.adoc +++ b/docs/guides/securing-apps/partials/oidc/oauth21-support.adoc @@ -3,7 +3,7 @@ {project_name} makes it easier for administrators to make sure that their clients are compliant with these specifications: -* https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-10[The OAuth 2.1 Authorization Framework - draft specification] +* https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-13[The OAuth 2.1 Authorization Framework - draft specification] This compliance means that the {project_name} server will verify the requirements for the authorization server, which are mentioned in the specifications. {project_name} adapters do not have any specific support for the OAuth 2.1, hence the required validations on the client (application) diff --git a/docs/guides/securing-apps/partials/oidc/supported-grant-types.adoc b/docs/guides/securing-apps/partials/oidc/supported-grant-types.adoc index 9ee9e01f6bf2..b6d086b645a8 100644 --- a/docs/guides/securing-apps/partials/oidc/supported-grant-types.adoc +++ b/docs/guides/securing-apps/partials/oidc/supported-grant-types.adoc @@ -28,15 +28,15 @@ browser history. You can somewhat mitigate this problem by using short expiratio For more details, see the https://openid.net/specs/openid-connect-core-1_0.html#ImplicitFlowAuth[Implicit Flow] in the OpenID Connect specification. -Per current https://datatracker.ietf.org/doc/html/draft-ietf-oauth-security-topics#name-implicit-grant[OAuth 2.0 Security Best Current Practice], this flow should not be used. -This flow is removed from the future https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-09[OAuth 2.1 specification]. +Per current https://datatracker.ietf.org/doc/html/rfc9700#name-implicit-grant[Best Current Practice for OAuth 2.0 Security (RFC 9700)], this flow SHOULD NOT be used. +This flow is removed from the future https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-13[OAuth 2.1 specification]. [[_resource_owner_password_credentials_flow]] === Resource Owner Password Credentials Resource Owner Password Credentials, referred to as Direct Grant in {project_name}, allows exchanging user credentials for tokens. -Per current https://datatracker.ietf.org/doc/html/draft-ietf-oauth-security-topics#name-resource-owner-password-cre[OAuth 2.0 Security Best Practices], -this flow should not be used, preferring alternative methods such as <> or <>. +Per current https://datatracker.ietf.org/doc/html/rfc9700#name-resource-owner-password-cre[Best Current Practice for OAuth 2.0 Security (RFC 9700)], +this flow MUST NOT be used, preferring alternative methods such as <> or <>. The limitations of using this flow include: @@ -56,7 +56,7 @@ Security concerns with this flow include: For a client to be permitted to use the Resource Owner Password Credentials grant, the client has to have the `Direct Access Grants Enabled` option enabled. This flow is not included in OpenID Connect, but is a part of the OAuth 2.0 specification. -It is removed from the future https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-09[OAuth 2.1 specification]. +It is removed from the future https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-13[OAuth 2.1 specification]. For more details, see the https://datatracker.ietf.org/doc/html/rfc6749#section-4.3[Resource Owner Password Credentials Grant] chapter in the OAuth 2.0 specification. diff --git a/docs/guides/securing-apps/partials/overview/getting-started.adoc b/docs/guides/securing-apps/partials/overview/getting-started.adoc index 5435b319dead..dc687aaec99d 100644 --- a/docs/guides/securing-apps/partials/overview/getting-started.adoc +++ b/docs/guides/securing-apps/partials/overview/getting-started.adoc @@ -11,15 +11,15 @@ Connect and SAML protocols. ifeval::[{project_community}==true] ==== Java -* {quickstartRepo_link}/tree/latest/jakarta/servlet-authz-client[Wildfly Elytron OIDC] -* {quickstartRepo_link}/tree/latest/spring/rest-authz-resource-server[Spring Boot] +* {quickstartRepo_link}/tree/main/jakarta/servlet-authz-client[Wildfly Elytron OIDC] +* {quickstartRepo_link}/tree/main/spring/rest-authz-resource-server[Spring Boot] endif::[] ==== JavaScript (client-side) -* <@links.securingapps id="javascript-adapter"/> +* https://www.keycloak.org/securing-apps/javascript-adapter[Keycloak JS adapter] ==== Node.js (server-side) -* <@links.securingapps id="nodejs-adapter"/> +* https://www.keycloak.org/securing-apps/nodejs-adapter[Keycloak Node.js adapter] ifeval::[{project_community}==true] ==== C# diff --git a/docs/guides/securing-apps/saml-galleon-layers-detailed-config.adoc b/docs/guides/securing-apps/saml-galleon-layers-detailed-config.adoc index ebfa1bd6b2ea..7495aeda40b0 100644 --- a/docs/guides/securing-apps/saml-galleon-layers-detailed-config.adoc +++ b/docs/guides/securing-apps/saml-galleon-layers-detailed-config.adoc @@ -5,7 +5,7 @@ title="{project_name} SAML Galleon feature pack detailed configuration" priority=70 tileVisible="false" -summary="Detailed list of elements for the `keycloak-saml.xml` configuration file"> +summary="Review this detailed list of elements for the `keycloak-saml.xml` configuration file."> This {section} contains the detailed list of elements for the `keycloak-saml.xml` configuration file used by the {project_name} SAML Galleon feature pack. diff --git a/docs/guides/securing-apps/saml-galleon-layers.adoc b/docs/guides/securing-apps/saml-galleon-layers.adoc index 3726b5b55899..4d319c84e8ad 100644 --- a/docs/guides/securing-apps/saml-galleon-layers.adoc +++ b/docs/guides/securing-apps/saml-galleon-layers.adoc @@ -4,7 +4,7 @@ <@tmpl.guide title="{project_name} SAML Galleon feature pack for WildFly and EAP" priority=60 -summary="Using {project_name} SAML Galleon feature pack to secure applications in WildFly and EAP"> +summary="Using {project_name} SAML Galleon feature pack to secure applications in WildFly and EAP."> The SAML adapter is distributed as a Galleon feature pack for wildfly 29 or newer. More details about the subject in the https://docs.wildfly.org/32/WildFly_Elytron_Security.html#Keycloak_SAML_Integration[WildFly documentation]. diff --git a/docs/guides/securing-apps/token-exchange.adoc b/docs/guides/securing-apps/token-exchange.adoc index a49dfe81caa0..05eea3086123 100644 --- a/docs/guides/securing-apps/token-exchange.adoc +++ b/docs/guides/securing-apps/token-exchange.adoc @@ -2,9 +2,327 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Using token exchange" +title="Configuring and using token exchange" priority=120 -summary="Configuring and using Token exchange with {project_name}"> +summary="Configure and use token exchange for {project_name}."> + +Token exchange is the process that allows a client application to exchange one token for another token. In {project_name}, two features implement token exchange: + +* <<_standard-token-exchange,Standard token exchange: version 2 (V2)>> - This feature is the fully supported token exchange implementation that is enabled by default once the {project_name} server is started. +* <<_legacy-token-exchange,Legacy token exchange: version 1 (V1)>> - This preview feature is not enabled by default once {project_name} server is started. + +The capabilities of {project_name} for token exchange are as follows: + +. A client can exchange an existing {project_name} token created for a specific client for a new token targeted to a different client in the same realm. +. A client can exchange an existing {project_name} token for an external token, such as a linked Facebook account. +. A client can exchange an external token for a {project_name} token. +. A client can impersonate a user. + +The standard token exchange supports only use-case (1). The legacy token exchange support the four use-cases, but it is a preview feature. Therefore, the standard token exchange V2 is recommended since it is supported and will be maintained for the future. The legacy token exchange is useful for last three use cases, but it may not be +backwards compatible with future {project_name} versions. You can also enable both token exchange features and use them together. For example, you could use both internal-internal exchange +provided by V2 together with other use cases that are supported by V1. For more details, see this <<_standard-token-exchange-comparison,token exchange comparison>>. + +NOTE: If you still need legacy token exchange feature, you also need link:{adminguide_link}#fine-grained-admin-permissions-v1[Fine-grained admin permissions version 1] (FGAP:v1) enabled because +link:{adminguide_link}#_fine_grained_permissions[version 2 (FGAP:v2)] does not have support for token exchange permissions. This is on purpose because +token-exchange is conceptually not really an "admin" permission and hence there is no plan to add token exchange permissions to FGAP:v2. + +[[_standard-token-exchange]] +== Standard token exchange + +Standard token exchange in {project_name} implements the https://datatracker.ietf.org/doc/html/rfc8693[Token exchange specification]. It allows client application to exchange an existing {project_name} token created +for a specific client for a new token issued to the client that triggered the token exchange request. Both clients must be in the same realm. + +[[_standard-token-exchange-flow]] +=== Token exchange flow + +Consider this typical token exchange flow: + +. The user authenticates with the use of the {project_name} SSO to the client application `initial-client`. The token is issued to the `initial-client`. +. The client `initial-client` may need to use the REST service `requester-client`, which requires authentication. So the `initial-client` sends the access token from step 1 to the `requester-client` with the +use of the token +. To serve the request, the `requester-client` may need to call another service `target-client`. However it may be unable to use the token sent to it from `initial-client`. For example: +* The token has insufficient permissions or scopes. +* The `target-client` is not specified as the token audience; the token was intended to be used to invoke `requester-client`. +* The token has to many permissions; therefore, the `requester-client` may not want to share it with the `target-client`. ++ +Any of these situations could be the reason to invoke the token exchange. The `requester-client` may need to send the token exchange request to the {project_name} server and use the original token from step 1 as the +_subject token_ and exchange it for another token _requested token_. +. The _requested token_ is returned to `requester-client`. This token can now be sent to the `target-client`. +. The `target-client` can fulfill the request and return the response to the `requester-client`. The `requester-client` can then follow and return the response to the request from step 2. + +Many other use-cases exist for token exchange, but the preceding example is the most typical. + +==== Example token exchange request + +The following is an example token exchange request of the client `requester-client` in the realm `test`. Note that `subject_token` is the access token issued to the `initial-client`: + +[source,bash] +---- +POST /realms/test/protocol/openid-connect/token +Authorization: Basic cmVxdWVzdGVyLWNsaWVudDpwYXNzd29yZA== +Content-Type: application/x-www-form-urlencoded +Accept: application/json + +grant_type=urn:ietf:params:oauth:grant-type:token-exchange& +subject_token=$SUBJECT_TOKEN& +subject_token_type=urn:ietf:params:oauth:token-type:access_token& +requested_token_type=urn:ietf:params:oauth:token-type:access_token +---- + +The example token exchange response may look like this: + +[source,json] +---- +{ + "access_token": "eyJhbGciOiJSUzI1NiIsIn...", + "expires_in": 300, + "token_type": "Bearer", + "issued_token_type": "urn:ietf:params:oauth:token-type:access_token", + "session_state": "287f3c57-32b8-4c0f-8b00-8c7db231d701", + "scope": "default-scope1", + "refresh_expires_in": 0, + "not-before-policy": 0 +} +---- + +[[_standard-token-exchange-enable]] +=== How to enable token exchange + +For standard token exchange, `token-exchange-standard:v2` is enabled by default. However, you also need to enable the *Standard token exchange* switch for +the client that is supposed to send token exchange requests, such as the `requester-client` from the <<_standard-token-exchange-flow,previous example>>. Note that `requester-client` must be a confidential client. +Also, as is the case for other grant requests, the token exchange requests must be authenticated by the appropriate link:{adminguide_link}#_client-credentials[client authentication method] that is configured +for the client. + +.Enabling Token Exchange +image::token-exchange-switch.png[Enabling Token Exchange] + +[[_standard-token-exchange-request]] +=== Request and response parameters + +The parameters are aligned with the https://datatracker.ietf.org/doc/html/rfc8693#name-token-exchange-request-and-[Token exchange specification], which are described as follows: + +grant_type:: + _REQUIRED._ The value of the parameter must be `urn:ietf:params:oauth:grant-type:token-exchange`. + +subject_token:: + _REQUIRED._ A security token that represents the identity of the party on behalf of whom the request is being made. + +subject_token_type:: + _REQUIRED._ This parameter is the type of the token passed in the `subject_token` parameter. This must be `urn:ietf:params:oauth:token-type:access_token` when the standard token exchange is being used + because {project_name} does not support other types for the standard token exchange. + +requested_token_type:: + _OPTIONAL._ This parameter represents the type of token that the client wants to exchange for. In this version, only oauth and OpenID Connect token types are supported. The default value for this + is `urn:ietf:params:oauth:token-type:access_token`. Another possible value is `urn:ietf:params:oauth:token-type:id_token` if the ID token issued to `requester-client` is requested. The possible value might + be also `urn:ietf:params:oauth:token-type:refresh_token`; in this case, you will receive both an access token and refresh token within the response. However, the refresh token is allowed if the `Allow refresh token in Standard Token Exchange` client configuration option is enabled as specified in the <<_standard-token-exchange-details,standard token exchange>> section. + +scope:: + _OPTIONAL._ This parameter represents the space-delimited set of OAuth and OpenID Connect scopes that the client is requesting. You can use link:{adminguide_link}#_client_scopes[Optional client scopes] + of the `requester-client`. For more details, see <<_standard-token-exchange-scope,scopes and audiences>>. Omitting this parameter means that only + the link:{adminguide_link}#_client_scopes[Default client scopes] are effectively used. + +audience:: + _OPTIONAL._ Audience specifies `client_id` of the client, which is supposed to be used as the token audience. In <<_standard-token-exchange-flow,the example above>>, it could be `target-client`. Multiple + values of this parameter are allowed, which means that you want the token to contain multiple audiences to be used by `requester-client` in multiple different services. For example + `audience=target-client1&audience=target-client2` can be used in the request. More details in <<_standard-token-exchange-scope,the section about scopes and audiences>>. + +A successful response is returned in the JSON format. It contains similar parameters such as the response from other grants. The following are some token exchange specifics of the more notable parameters: + +access_token:: + The requested access token. Note that if request specified `requested_token_type=urn:ietf:params:oauth:token-type:id_token`, this parameter may actually contain the ID token instead of access token. + This behavior is per https://datatracker.ietf.org/doc/html/rfc8693#section-2.2.1[the token exchange specification]. + +refresh_token:: + The refresh token. It is included just if `requested_token_type=urn:ietf:params:oauth:token-type:refresh_token` is used and the client has enabled issuing refresh tokens from the token exchange + +issued_token_type:: + The issued requested token type. Same value as `requested_token_type` used in the request. + +token_type:: + Usually `Bearer` if issued token type was access token or refresh token. In case of ID token requested, the value is `N_A` + +[[_standard-token-exchange-scope]] +=== Scopes and audiences + +The `scope` parameter in the token exchange request has the same meaning as other grants. This parameter is optional. When it is omitted, the effective client scopes used in the request are +the link:{adminguide_link}#_client_scopes[Default client scopes] of the `requester-client`. When this parameter is used, the effective client scopes are the default scopes together with +the link:{adminguide_link}#_client_scopes[Optional client scopes] + +By default, the used client scopes will add the audiences to the `aud` claim of the token based on the used client scopes and client roles as specified in the link:{adminguide_link}#audience-support[Audience documentation]. + +The `audience` parameter can be used for filtering of audiences, so that the `aud` claim will contain only the audiences specified by the `audience` parameter. Similarly the client roles in the token will +be filtered and the token will have only the client roles of the clients specified by the `audience` parameter. + +In addition, the `audience` parameter can be used to potentially filter client scopes as well. It works in a manner that is similar way to link:{adminguide_link}#client-scopes-permissions[Client scope permission for users]. +If the client scope does not contain any client roles (for example, it contains zero roles or it contains only realm roles), no additional filtering occurs for client scopes. However, if the client scope contains any +client role mappings, it must include some client roles of the clients requested by the `audience` parameter. Composite roles are also included for consideration. If the client scope contains no client +roles of the clients requested by the `audience`, the client scope will be filtered. + +NOTE: The `audience` parameter can be used to filter the audiences that are coming from the used client scopes. However, this parameter will not add more audiences. When the audience parameter is omitted, +no filtering occurs. As a result, the `audience` parameter is effectively used for "downscoping" the token to make sure that it contains only the requested audiences. However, the `scope` parameter is used +to add optional client scopes and hence it can be used for "upscoping" and adding more scopes. + +==== Examples + +Here are some examples to better illustrate the behavior for scopes and audiences. + +Assume we have the realm with: + +* Client `target-client1` with the client role `target-client1-role` + +* Client `target-client2` with the client role `target-client2-role` + +* Client `target-client3` with the client role `target-client3-role` + +* Client scope `default-scope1`. This client scope has role scope mapping for the client role `target-client1/target-client1-role` + +* Client scope `optional-scope2`. This client scope has role scope mapping for the client role `target-client2/target-client2-role` + +* Client `requester-client`, which has client scope `default-scope1` added as default client scope and scope `optional-scope2` added as an optional client scope + +* Authenticated user, who is member of both `target-client1-role` and `target-client2-role` + +The settings above means that using scope `default-scope1` will add the audience `target-client1` to the token and using `optional-scope2` will add the audience `target-client2`. This is because of the +audience resolving described in the link:{adminguide_link}#_audience_resolve[Audience documentation]. + + +===== Example 1 + +Token exchange request sent with `scope=optional-scope2` and without audience parameter: + +There will be no filtering of audience. The scopes and audiences will be resolved as is the case for any other grants as described in the link:{adminguide_link}#_client_scopes[Client scopes] and +link:{adminguide_link}#_audience_resolve[Audience documentation] sections. The response token will be similar to this (claims not interesting for this example omitted for brevity): + +[source,json] +---- +{ + "azp": "requester-client", + "scope": "default-scope1 optional-scope2", + "aud": [ "target-client1", "target-client2" ], + "resource_access": { + "target-client1": { + "roles": [ "target-client1-role" ] + }, + "target-client2": { + "roles": [ "target-client2-role" ] + } + }, + ... +} +---- + +===== Example 2 + +Token exchange request sent with `scope=optional-scope2` and with `audience=target-client2` + +Same like previous example, but `target-client1` audience and client roles filtered due audience parameter was included, but only with this `target-client2` client. The client scope `default-scope1` will be +also filtered due it contains some client roles, but at the same time, it does not contain any client roles of requested audience client `target-client2`. So token would be like: + +[source,json] +---- +{ + "azp": "requester-client", + "scope": "optional-scope2", + "aud": [ "target-client2" ], + "resource_access": { + "target-client2": { + "roles": [ "target-client2-role" ] + } + }, + ... +} +---- + +===== Example 3 + +Token exchange request sent with `scope=optional-scope2` and with `audience=target-client2&audience=target-client3` + +The `target-client3` is not part of the token audience as user does not have any roles. So in this case, the request will be rejected as some of the requested audiences are not available. + +NOTE: As mentioned in the token exchange specification, it is good practice to downscope the token as much as possible and use only the audiences needed. Ideally use a single audience. This strategy increases the probability +that request will be allowed. + +NOTE: If you have a more complex deployment with many various scopes and audiences, it can be challenging to model it in an appropriate way. Consider using the link:{adminguide_link}#_client_scopes_evaluate[Client scopes evaluate tab] +to test if the token looks as expected for the given user and for the given set of scopes and audiences. + +[[_standard-token-exchange-details]] +=== Token exchange - Additional details + +These additional points clarify the behavior of token exchange. + +* It is not supported for public clients to send the token exchange requests. The V1 had some very limited support to public clients, when public client can exchange the token to itself with less scopes. +This use case can be replaced by refresh token grant. + +* The `subject_token` sent to the token exchange endpoint must have the requester client set as an audience in the `aud` claim. Otherwise, the request would be rejected. The only exception is, if client +exchanges his own token, which was issued to it. Exchanging to itself might be useful to downscope/upscope the token or filter unneeded token audiences and so on. + +* Consents - If the requester client has *Consent required* enabled, the token exchange is allowed only if the user is already granted consent to all requested scopes + +* link:{adminguide_link}#_fine_grain_permissions[Fine-grained admin permissions (FGAP)] are not needed for the standard token exchange. We plan to eventually integrate with FGAP for the future, but that +integration might be available to all grants. It will not be specific only to token exchange as it was in token exchange V1. + +* Integrating token exchange with link:{adminguide_link}#_client_policies[Client policies] is possible. This integration can be useful to address certain use cases. For example, consider the use case to reject the token exchange request if the +client `requester-client` sends the request with `scope=some-confidential-scope`. In this example, it can be useful to create a client policy condition with combined conditions +for `client-scope`, `grant-type` and `client-roles`. + +* Requesting a refresh token is allowed only if the client has the switch *Allow refresh token in Standard Token Exchange* set to a value other than `No` (the default value). The switch is available in the +Admin Console in the *Advanced* tab of the OIDC client in the *OpenID Connect Compatibility Modes* section. The other available value of the switch is *Same session*, which means that the refresh token is +allowed only if the refresh token can use the same user session as the subject token. If that subject token is coming from a link:{adminguide_link}#_transient-session[Transient session] or from an +link:{adminguide_link}#_offline-access[Offline session], the requesting refresh token will not be allowed. Similarly it will not be allowed to request an offline token (using `scope=offline_access`). + +.Enabling refresh token in Token Exchange +image::token-exchange-switch-refresh.png[Enabling refresh token in Token Exchange] + +* Token exchange never creates a new link:{adminguide_link}#managing-user-sessions[user session]. In case that `requested_token_type` is a refresh token, it may eventually create a new client session in the user session +for the requester client (if the client session was not yet created). + +* {project_name} Token exchange does not yet have support for the `resource` parameter. + +* The token exchange specification mentions the concepts of https://datatracker.ietf.org/doc/html/rfc8693#name-delegation-vs-impersonation[impersonation and delegation]. {project_name} has support for the +impersonation use case, but not yet for the delegation use case. + +==== Revocation + +Assuming that there is a subject token `access-token1` issued to the client `initial-client`, here are some considerations related to token revocation: + +* For the case when the `access-token1` was exchanged to the `access-token2` of the client `requester-client`, the revocation of the `access-token1` will not revoke `access-token2`. Supporting of a "revocation chain" for access +tokens would mean quite an overhead. So considering this, the administrator must ensure that access tokens are short-lived and are revoked automatically after some time. + +* For the case when `access-token1` was exchanged to `refresh-token2` of client `requester-client`, we try to support revocation chain. This means that: + ** Revocation of `access-token1` will revoke also `refresh-token2`. Moreover this will remove the client session of the client `requester-client` from the + user session and hence all refresh tokens of `requester-client` in this user session will be effectively revoked + ** In case that `refresh-token2` and it's related access token was used for the further token exchange to different client, then revocation of `access-token1` will revoke those subsequent token exchanges + as well. In other words, the whole "chain" of exchanged tokens is going to be revoked. + ** Note that the access token should be valid when the revocation endpoint is invoked. If you do not have a valid access token when the original `access-token1` has expired, you can potentially use another + access token issued to same client in the same user session. The exchanged tokens such as `refresh-token2` and others from the "chain" should be revoked. + +[[_standard-token-exchange-comparison]] +=== Comparison of standard token exchange and legacy token exchange + +While the preceding sections fully detail standard and legacy token exchange, the following is an overall summary that compares the two token exchange methods. + +[cols="3*", options="header"] +|=== +|Capability |Standard token exchange V2 |Legacy token exchange V1 +s|Internal-internal token exchange | Supported. Implemented as per rfc8693 | Preview support. Loose implementation of rfc8693. It is recommended to use V2 instead +s|Allowed `subject_token_type` | Access token type only | Access token type only for internal-internal, JWT for external-internal scenarios +s|Allowed `requested_token_type` | Access token (default), Refresh token, ID token | Access token, Refresh token (default), SAML2 assertion +s|Behaviour of `scope` parameter | Aligned with other grants. Scope parameter means requesting optional scopes of the client, which sent the token exchange request | Scope parameter based on scopes of +the "target" client specified by audience parameter. Downscoping support only +s|Behavior of `audience` parameter | Support for more values as per the specification. Can be used to narrow down the available audiences and keep only the requested audiences. Effectively downscoping the token per +the required target audience | Support for single audience value. Token effectively issued to the client requested by audience parameter and using scopes of that client +s|Public clients | Not available. Downscoping implemented by V1 can be replaced by refresh token grant| Available only to exchange token of the client itself. Effectively downscoping support only +s|Consents | Allowed for clients with `Consent required` as long as the user is already granted consent | Not allowed for clients with *Consent required* +s|Authorization | Verification that the requester client must be in the audience of the `subject_token`. Integration with client policies. No Fine-grained admin permissions | Based on fine-grained admin permissions version 1 +s|Revocation chain | Not available for access tokens. Available for refresh tokens | Not available for access nor refresh tokens +s|Delegation per rfc8693|Not supported yet|Not supported +s|Resource parameter per rfc8693|Not supported yet|Not supported +s|Federated token exchange | Not implemented yet | Implemented as a preview +s|Subject impersonation (including direct naked impersonation) | Not implemented yet | Implemented as a preview +|=== + +[[_legacy-token-exchange]] +== Legacy token exchange :tech_feature_name: Token Exchange :tech_feature_id: token-exchange @@ -29,7 +347,7 @@ To use more than the <<_internal-token-to-internal-token-exchange,Internal Token For details, see the https://www.keycloak.org/server/features[Enabling and disabling features] {section}. ==== -== How token exchange works +=== How token exchange works In {project_name}, token exchange is the process of using a set of credentials or token to obtain an entirely different token. A client may want to invoke on a less trusted application so it may want to downgrade the current token it has. @@ -57,7 +375,7 @@ Public clients specify their client identifier as a form parameter. Confidentia to pass their client id and secret, Basic Auth, or however your admin has configured the client authentication flow in your realm. -=== Form parameters +==== Form parameters client_id:: _REQUIRED MAYBE._ This parameter is required for clients using form parameters for authentication. If you are using @@ -97,7 +415,7 @@ scope:: NOTE: We currently only support OpenID Connect and OAuth exchanges. Support for SAML based clients and identity providers may be added in the future depending on user demand. -=== Responses from a token exchange request +==== Responses from a token exchange request A successful response from an exchange invocation will return the HTTP 200 response code with a content type that depends on the `requested-token-type` and `requested_issuer` the client asks for. OAuth requested token types will return @@ -140,7 +458,10 @@ For simplicity's sake, let's call a token minted by the current realm as an _int an external realm or identity provider as an _external_ token. [[_internal-token-to-internal-token-exchange]] -== Internal token to internal token exchange +=== Internal token to internal token exchange + +NOTE: For internal token to internal token exchange, it is recommended to use <<_standard-token-exchange,Standard token exchange>> instead of using the legacy token exchange flow described below. +Standard token exchange is officially supported. With an internal token to token exchange you have an existing token minted to a specific client and you want to exchange this token for a new one minted for a different target client. Why would you want to do this? This generally happens @@ -150,7 +471,7 @@ need to perform a "permission downgrade" where your app needs to invoke on a les to propagate your current access token. [[_client_to_client_permission]] -=== Granting permission for the exchange +==== Granting permission for the exchange Clients that want to exchange tokens for a different client need to be authorized in the Admin Console. You need to define a `token-exchange` fine grain permission in the target client you want permission to exchange to. @@ -195,7 +516,7 @@ Your client now has permission to invoke. If you do not do this correctly, you try to make an exchange. [[_internal_internal_making_request]] -=== Making the request +==== Making the request When your client is exchanging an existing token for a token targeting another client, you use the `audience` parameter. This parameter must be the client identifier for the target client that you configured in the Admin Console. @@ -234,7 +555,7 @@ the client making the request to successfully complete the exchange. } ---- -== Internal token to external token exchange +=== Internal token to external token exchange You can exchange a realm token for an external token minted by an external identity provider. This external identity provider must be configured within the `Identity Provider` section of the Admin Console. Currently only OAuth/OpenID Connect based external @@ -253,7 +574,7 @@ If the account is not linked, the exchange response will contain a link you can discussed more in the <<_internal_external_making_request, Making the Request>> section. [[_grant_permission_external_exchange]] -=== Granting permission for the exchange +==== Granting permission for the exchange Internal to external token exchange requests will be denied with a 403, Forbidden response until you grant permission for the calling client to exchange tokens with the external identity provider. To grant permission to the client, you go to the identity provider's configuration page to the *Permissions* tab. @@ -293,7 +614,7 @@ image::exchange-idp-apply-policy.png[Apply Client Policy] Your client now has permission to invoke. If you do not do this correctly, you will get a 403 Forbidden response if you try to make an exchange. [[_internal_external_making_request]] -=== Making the request +==== Making the request When your client is exchanging an existing internal token to an external one, you provide the `requested_issuer` parameter. The parameter must be the alias of a configured identity provider. @@ -340,7 +661,8 @@ so that the client can perform link:{developerguide_link}[Client Initiated Accou providers require linking through browser OAuth protocol. With the `account-link-url` just add a `redirect_uri` query parameter to it and you can forward browsers to perform the link. -== External token to internal token exchange +[[_external-token-to-internal-token-exchange]] +=== External token to internal token exchange You can trust and exchange external tokens minted by external identity providers for internal tokens. This can be used to bridge between realms or just to trust tokens from your social provider. It works similarly to an identity provider @@ -359,7 +681,7 @@ These types of changes required a configured identity provider in the Admin Cons NOTE: SAML identity providers are not supported at this time. Twitter tokens cannot be exchanged either. -=== Granting permission for the exchange +==== Granting permission for the exchange Before external token exchanges can be done, you grant permission for the calling client to make the exchange. This permission is granted in the same manner as <<_grant_permission_external_exchange, internal to external permission is granted>>. @@ -368,7 +690,7 @@ If you also provide an `audience` parameter whose value points to a different cl must also grant the calling client permission to exchange to the target client specific in the `audience` parameter. How to do this is <<_client_to_client_permission, discussed earlier>> in this section. -=== Making the request +==== Making the request The `subject_token_type` must either be `urn:ietf:params:oauth:token-type:access_token` or `urn:ietf:params:oauth:token-type:jwt`. If the type is `urn:ietf:params:oauth:token-type:access_token` you specify the `subject_issuer` parameter and it must be the @@ -411,21 +733,24 @@ an example JSON response you get back from this call. ---- -== Impersonation +=== Impersonation For internal and external token exchanges, the client can request on behalf of a user to impersonate a different user. For example, you may have an admin application that needs to impersonate a user so that a support engineer can debug a problem. +NOTE: The impersonation scenario mentioned here is different from the https://datatracker.ietf.org/doc/html/rfc8693#name-delegation-vs-impersonation[impersonation concept of the token exchange specification]. +The specification does not support impersonating the token subject to different subject. The specification semantics rather means "impersonating the client" instead of "impersonating the user". + -=== Granting permission for the exchange +==== Granting permission for the exchange The user that the subject token represents must have permission to impersonate other users. See the link:{adminguide_link}[{adminguide_name}] on how to enable this permission. It can be done through a role or through fine grain admin permissions. -=== Making the request +==== Making the request Make the request as described in other chapters except additionally specify the `requested_subject` parameter. The value of this parameter must be a username or user id. @@ -443,7 +768,7 @@ curl -X POST \ http://localhost:8080{kc_realms_path}/myrealm/protocol/openid-connect/token ---- -== Direct Naked Impersonation +=== Direct Naked Impersonation You can make an internal token exchange request without providing a `subject_token`. This is called a direct naked impersonation because it places a lot of trust in a client as that client can impersonate any user in the realm. @@ -454,7 +779,7 @@ is able to authenticate users itself, but not able to obtain a token. WARNING: It is very risky to enable direct naked impersonation for a client. If the client's credentials are ever stolen, that client can impersonate any user in the system. -=== Granting permission for the exchange +==== Granting permission for the exchange If the `audience` parameter is provided, then the calling client must have permission to exchange to the client. How to set this up is discussed earlier in this chapter. @@ -504,7 +829,7 @@ try to make this type of exchange. NOTE: Public clients are not allowed to do direct naked impersonations. -=== Making the request +==== Making the request To make the request, simply specify the `requested_subject` parameter. This must be the username or user id of a valid user. You can also specify an `audience` parameter if you wish. @@ -519,14 +844,14 @@ curl -X POST \ http://localhost:8080{kc_realms_path}/myrealm/protocol/openid-connect/token ---- -== Expand permission model with service accounts +=== Expand permission model with service accounts When granting clients permission to exchange, you don't necessarily manually enable those permissions for each and every client. If the client has a service account associated with it, you can use a role to group permissions together and assign exchange permissions by assigning a role to the client's service account. For example, you might define a `naked-exchange` role and any service account that has that role can do a naked exchange. -== Exchange vulnerabilities +=== Exchange vulnerabilities When you start allowing token exchanges, there are various things you have to both be aware of and careful of. diff --git a/docs/guides/server/all-config.adoc b/docs/guides/server/all-config.adoc index d105c15b4612..ab1e5f3244bd 100644 --- a/docs/guides/server/all-config.adoc +++ b/docs/guides/server/all-config.adoc @@ -3,7 +3,7 @@ <@template.guide title="All configuration" -summary="Complete list of all build options and configuration for {project_name}"> +summary="Review build options and configuration for {project_name}."> <#list ctx.options.categories as category> <#assign categoryOptions=ctx.options.getValues(category)> diff --git a/docs/guides/server/all-provider-config.adoc b/docs/guides/server/all-provider-config.adoc index 9f0f86821998..1b8f7fff7541 100644 --- a/docs/guides/server/all-provider-config.adoc +++ b/docs/guides/server/all-provider-config.adoc @@ -3,7 +3,7 @@ <@template.guide title="All provider configuration" -summary="Complete list of all the available provider configuration options"> +summary="Review provider configuration options."> <#list ctx.options.getProviderOptions() as spi, providers> == ${spi} diff --git a/docs/guides/server/bootstrap-admin-recovery.adoc b/docs/guides/server/bootstrap-admin-recovery.adoc index ddb9f354b2b1..45a8a962c2c6 100644 --- a/docs/guides/server/bootstrap-admin-recovery.adoc +++ b/docs/guides/server/bootstrap-admin-recovery.adoc @@ -3,8 +3,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Admin bootstrap and recovery" -summary="Learn how to bootstrap and recover admin account."> +title="Bootstrapping and recovering an admin account" +summary="Bootstrap {project_name} and recover access by creating a temporary admin account."> == A temporary admin account @@ -31,7 +31,7 @@ Additionally, it is strongly recommended to use the dedicated command with the s If you have built an optimized version of {project_name} with the `build` command as outlined in <@links.server id="configuration"/>, use the command line option `--optimized` to have {project_name} skip the build check for a faster startup time. When doing this, remove the build time options from the command line and keep only the runtime options. -NOTE: if you do not use `--optimized` keep in mind that an `bootstrap-admin` command will implicitly create or update an optimized image for you - if you are running the command from the same machine as a server instance, this may impact the next start of your server. +NOTE: if you do not use `--optimized` keep in mind that an `bootstrap-admin` command may implicitly create or update an optimized build for you - if you are running the command from the same machine as a server instance, this may impact the next start of your server. === Create an admin user diff --git a/docs/guides/server/caching.adoc b/docs/guides/server/caching.adoc index 3f1df6a761f9..be308a8eeb0e 100644 --- a/docs/guides/server/caching.adoc +++ b/docs/guides/server/caching.adoc @@ -5,8 +5,7 @@ <@tmpl.guide title="Configuring distributed caches" -summary="Understand how to configure the caching layer" -includedOptions="cache cache-*"> +summary="Configure the caching layer to cluster multiple {project_name} instances and to increase performance."> {project_name} is designed for high availability and multi-node clustered setups. The current distributed cache implementation is built on top of https://infinispan.org[Infinispan], a high-performance, distributable in-memory data grid. @@ -40,6 +39,7 @@ You configure these caches in `conf/cache-ispn.xml`: |users|Local|Cache persisted user data |authorization|Local|Cache persisted authorization data |keys|Local|Cache external public keys +|crl|Local|Cache for X.509 authenticator CRLs |work|Replicated|Propagate invalidation messages across nodes |authenticationSessions|Distributed|Caches authentication sessions, created/destroyed/expired during the authentication process |sessions|Distributed|Cache persisted user session data @@ -108,36 +108,41 @@ CPU, memory, and network utilization. These in-memory caches for user sessions and client sessions are limited to, by default, 10000 entries per node which reduces the overall memory usage of {project_name} for larger installations. The internal caches will run with only a single owner for each cache entry. -Consider trade-off between memory consumption and the database utilization and set different sizes for the caches, edit {project_name}'s cache config file (`conf/cache-ispn.xml`) to set a `++` for those caches. -.Volatile user sessions +.Offline user sessions +As an OpenID Connect Provider, the server is capable of authenticating users and issuing offline tokens. When issuing an offline token after successful authentication, the server creates an offline user session and offline client session. + +The following caches are used to store offline sessions: + +* offlineSessions +* offlineClientSessions -By default, user sessions are stored in the database and loaded on-demand to the cache. -It is possible to configure {project_name} to store user sessions in the cache only and minimize the database utilization. +Like the user and client sessions caches, the offline user and client session caches are limited to 10000 entries per node by default. Items which are evicted from the memory will be loaded on-demand from the database when needed. + +.Password brute force detection +The `loginFailures` distributed cache is used to track data about failed login attempts. +This cache is needed for the Brute Force Protection feature to work in a multi-node {project_name} setup. + +.Action tokens +Action tokens are used for scenarios when a user needs to confirm an action asynchronously, for example in the emails sent by the forgot password flow. +The `actionTokens` distributed cache is used to track metadata about action tokens. + +=== Volatile user sessions + +By default, regular user sessions are stored in the database and loaded on-demand to the cache. +It is possible to configure {project_name} to store regular user sessions in the cache only and minimize calls to the database. Since all the sessions in this setup are stored in-memory, there are two side effects related to this: -* Losing sessions on when all {project_name} nodes restart. +* Losing sessions when all {project_name} nodes restart. * Increased memory consumption. +When using volatile user sessions, the cache is the source of truth for user and client sessions. +{project_name} automatically adjusts the number of entries that can be stored in memory, and increases the number of copies to prevent data loss. + Follow these steps to enable this setup: -1. Since the cache is the only source of truth for user and client sessions, configure caches to not limit the number of entries and to replicate each entry to at least two nodes. To do so, edit {project_name}'s cache config file (`conf/cache-ispn.xml`) for caches `sessions` and `clientSessions` with the following update: -+ --- -* Remove the `++` -* Change `owners` attribute of the `distributed-cache` tag to 2 or more --- -+ -An example of the resulting configuration for the `sessions` cache would look as follows. -+ -[source,xml] ----- - - - ----- -2. Disable `persistent-user-sessions` feature using the following command: +1. Disable `persistent-user-sessions` feature using the following command: + ---- bin/kc.sh start --features-disabled=persistent-user-sessions ... @@ -148,27 +153,8 @@ bin/kc.sh start --features-disabled=persistent-user-sessions ... Disabling `persistent-user-sessions` is not possible when `multi-site` feature is enabled. ==== -.Offline user sessions -As an OpenID Connect Provider, the server is also capable of authenticating users and issuing offline tokens. Similarly to regular user and client sessions, -when an offline token is issued by the server upon successful authentication, the server also creates an offline user session and an offline client session. - -The following caches are used to store offline sessions: - -* offlineSessions -* offlineClientSessions - -Similarly to regular user and client sessions caches, also the offline user and client session caches are limited to 10000 entries per node by default. Items which are evicted from the memory will be loaded on-demand from the database when needed. -Consider trade-off between memory consumption and the database utilization and set different sizes for the caches, edit {project_name}'s cache config file (`conf/cache-ispn.xml`) to set a `++` for those caches. - -.Password brute force detection -The `loginFailures` distributed cache is used to track data about failed login attempts. -This cache is needed for the Brute Force Protection feature to work in a multi-node {project_name} setup. - -.Action tokens -Action tokens are used for scenarios when a user needs to confirm an action asynchronously, for example in the emails sent by the forgot password flow. -The `actionTokens` distributed cache is used to track metadata about action tokens. - === Configuring cache maximum size + In order to reduce memory usage, it's possible to place an upper bound on the number of entries which are stored in a given cache. To specify an upper bound of on a cache, you must provide the following command line argument `--cache-embedded-$\{CACHE_NAME}-max-count=`, with `$\{CACHE_NAME}` replaced with the name of the cache you would like to @@ -176,16 +162,20 @@ apply the upper bound to. For example, to apply an upper-bound of `1000` to the `--cache-embedded-offline-sessions-max-count=1000`. An upper bound can not be defined on the following caches: `actionToken`, `authenticationSessions`, `loginFailures`, `work`. +Setting a maximum cache size for `sessions`, `clientSessions`, `offlineSessions` and `offlineClientSessions` is not supported when volatile sessions are enabled. + === Configuring caches for availability Distributed caches replicate cache entries on a subset of nodes in a cluster and assigns entries to fixed owner nodes. Each distributed cache, that is a primary source of truth of the data (`authenticationSessions`, `loginFailures` and `actionTokens`) has two owners per default, which means that two nodes have a copy of the specific cache entries. Non-owner nodes query the owners of a specific cache to obtain data. +When one of the owners becomes unavailable, the data is restored from the remaining owner and rebalanced across the remaining nodes. When both owner nodes are offline, all data is lost. -The default number of owners is enough to survive 1 node (owner) failure in a cluster setup with at least three nodes. You are free -to change the number of owners accordingly to better fit into your availability requirements. To change the number of owners, open `conf/cache-ispn.xml` and change the value for `owners=` for the distributed caches to your desired value. +The default number of two owners is the minimum number is necessary to survive one node (owner) failure or a rolling restart in a cluster setup with at least two nodes. +A higher number increases the availability of the data, but at the expense of slower writes as more nodes need to be updated. +Therefore, changing the number of owners for the caches `authenticationSessions`, `loginFailures` and `actionTokens` is not recommended. === Specify your own cache configuration file @@ -212,7 +202,8 @@ The CLI options `cache-remote-username` and `cache-remote-password` are optional If the {jdgserver_name} server has authentication enabled, {project_name} will fail to start. == Transport stacks -Transport stacks ensure that distributed cache nodes in a cluster communicate in a reliable fashion. + +Transport stacks ensure that {project_name} nodes in a cluster communicate in a reliable fashion. {project_name} supports a wide range of transport stacks: <@opts.expectedValues option="cache-stack"/> @@ -241,7 +232,7 @@ The following table shows transport stacks that are available using the `--cache |=== |Stack name|Transport protocol|Discovery -|`kubernetes`|TCP|DNS resolution using the JGroups `DNS_PING` protocol. It requires to set `jgroups.dns.query` to the headless service FQDN. +|`kubernetes` (deprecated) |TCP|DNS resolution using the JGroups `DNS_PING` protocol. It requires to set `jgroups.dns.query` to the headless service FQDN. |`tcp` (deprecated)|TCP|IP multicast using the JGroups `MPING` protocol. See below on how to configure a unique `jgroups.mcast_addr` or `jgroups.mcast_port` for each cluster. |`udp` (deprecated)|UDP|IP multicast using the JGroups `PING` protocol. See below on how to configure a unique `jgroups.mcast_addr` or `jgroups.mcast_port` for each cluster. |=== @@ -251,62 +242,72 @@ By default, {project_name} uses `239.6.7.8` as multicast address for `jgroups.mc NOTE: Use `-D=` to pass the properties via the `JAVA_OPTS_APPEND` environment variable or in the CLI command. +// Keeping this paragraph in 26.x as stacks like `ec2` where mentioned in the 26.0 docs. +// Should be removed for 27.0 when `jdbc-ping` is the general purpose default for everyone. ==== *Additional Stacks* It is recommended to use one of the stacks available above. Additional stacks are provided by Infinispan, but it is outside the scope of this guide how to configure them. -Please refer to {infinispan_embedding_docs}#cluster-transport[Setting up Infinispan cluster transport] for further documentation. +Please refer to {infinispan_embedding_docs}#cluster-transport[Setting up Infinispan cluster transport] and {infinispan_embedding_docs}#customizing-jgroups-stacks_cluster-transport[Customizing JGroups stacks] for further documentation. ==== -=== Custom transport stacks -If none of the available transport stacks are enough for your deployment, you are able to change your cache configuration file -and define your own transport stack. +== Securing transport stacks -For more details, see {infinispan_embedding_docs}#customizing-jgroups-stacks_cluster-transport[Customizing JGroups stacks]. +Encryption using TLS is enabled by default for TCP-based transport stacks, which is also the default configuration +No additional CLI options or modifications of the cache XML are required as long as you are using a TCP-based transport stack. -.defining a custom transport stack -[source] +[NOTE] +==== +If you are using a transport stack based on `UDP` or `TCP_NIO2`, proceed as follows to configure the encryption of the transport stack: + +. Set the option `cache-embedded-mtls-enabled` to `false`. +. Follow the documentation in http://jgroups.org/manual5/index.html#ENCRYPT[JGroups Encryption documentation] and {infinispan_embedding_docs}#secure-cluster-transport[Encrypting cluster transport]. +==== + +With TLS enabled, {project_name} auto-generates a self-signed RSA 2048 bit certificate to secure the connection and uses TLS 1.3 to secure the communication. +The keys and the certificate are stored in the database so they are available to all nodes. +By default, the certificate is valid for 60 days and is rotated at runtime every 30 days. +Use the option `cache-embedded-mtls-rotation-interval-days` to change this. + +=== Running inside a service mesh + +When using a service mesh like Istio, you might need to allow a direct mTLS communication between the {project_name} Pods to allow for the mutual authentication to work. +Otherwise, you might see error messages like `JGRP000006: failed accepting connection from peer SSLSocket` that indicate that a wrong certificate was presented, and the cluster will not form correctly. + +You then have the option to allow direct mTLS communication between the {project_name} Pods, or rely on the service mesh transport security to encrypt the communication and to authenticate peers. + +To allow direct mTLS communication for {project_name} when using Istio: + +* Apply the following configuration to allow direct communication. ++ +[source,yaml] ---- - - - - - - - - - - ... - +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: infinispan-allow-nomtls +spec: + selector: + matchLabels: + app: keycloak # <1> + portLevelMtls: + "7800": # <2> + mode: PERMISSIVE ---- +<1> Update the labels to match your {project_name} deployment. +<2> Port 7800 is the default. Adjust it if you change the data transmission port. -By default, the value set to the `cache-stack` option has precedence over the transport stack you define in the cache configuration file. -If you are defining a custom stack, make sure the `cache-stack` option is not used for the custom changes to take effect. +As an alternative, to disable the mTLS communication, and rely on the service mesh to encrypt the traffic: -== Securing cache communication -The current Infinispan cache implementation should be secured by various security measures such as RBAC, ACLs, and transport stack encryption. +* Set the option `cache-embedded-mtls-enabled` to `false`. +* Configure your service mesh to authorize only traffic from other {project_name} Pods for the data transmission port (default: 7800). -JGroups handles all the communication between {project_name} server, and it supports Java SSL sockets for TCP communication. -{project_name} uses CLI options to configure the TLS communication without having to create a customized JGroups stack or modifying the cache XML file. +=== Proving your own keys and certificates -To enable TLS, `cache-embedded-mtls-enabled` must be set to `true`. -It requires a keystore with the certificate to use: `cache-embedded-mtls-key-store-file` sets the path to the keystore, and `cache-embedded-mtls-key-store-password` sets the password to decrypt it. +Although not recommended for standard setups, if it is essential in a specific setup, you can configure the keystore with the certificate for the transport stack manually. `cache-embedded-mtls-key-store-file` sets the path to the keystore, and `cache-embedded-mtls-key-store-password` sets the password to decrypt it. The truststore contains the valid certificates to accept connection from, and it can be configured with `cache-embedded-mtls-trust-store-file` (path to the truststore), and `cache-embedded-mtls-trust-store-password` (password to decrypt it). -To restrict unauthorized access, use a self-signed certificate for each {project_name} deployment. - -For JGroups stacks with `UDP` or `TCP_NIO2`, see the http://jgroups.org/manual5/index.html#ENCRYPT[JGroups Encryption documentation] on how to set up the protocol stack. - -For more information about securing cache communication, see the {infinispan_embedding_docs}#secure-cluster-transport[Encrypting cluster transport] documentation. +To restrict unauthorized access, always use a self-signed certificate for each {project_name} deployment. == Network Ports @@ -314,21 +315,97 @@ To ensure a healthy {project_name} clustering, some network ports need to be ope The table below shows the TCP ports that need to be open for the `jdbc-ping` stack, and a description of the traffic that goes through it. |=== -|Port |Property | Description +|Port |Option| Property | Description m|7800 -m|jgroups.bind.address +m|cache-embedded-network-bind-port +m|jgroups.bind.port |Unicast data transmission. m|57800 +m| m|jgroups.fd.port-offset |Failure detection by protocol `FD_SOCK2`. It listens to the abrupt closing of a socket to suspect a {project_name} server failure. -The `jgroups.fd.port-offset` property defines the offset from the `jgroups.bind.address`. +The `jgroups.fd.port-offset` property defines the offset from the `cache-embedded-network-bind-port` option or `jgroups.bind.port` property. +By default, the offset is set to 50000, making the failure detection port 57800. + +|=== + +NOTE: If an option is not available for the port you require, configure it using a system property `-D=` +in your `JAVA_OPTS_APPEND` environment variable or in your CLI command. + +[#network-bind-address] +== Network bind address + +To ensure a healthy {project_name} clustering, the network port must be bound on an interface that is accessible from all other nodes of the cluster. + +By default, it picks a site local (non-routable) IP address, for example, from the 192.168.0.0/16 or 10.0.0.0/8 address range. + +To override the address, set the option `cache-embedded-network-bind-address=`. + +The following special values are also recognized: + +|=== +|Value |Description + +m|GLOBAL +|Picks a global IP address if available. +If not available, it falls back to `SITE_LOCAL`. + +m|SITE_LOCAL +|Picks a site-local (non-routable) IP address (for example, from the 192.168.0.0 or 10.0.0.0 address ranges). +This is the default value. + +m|LINK_LOCAL +|Picks a link-local IP address from 169.254.1.0 through 169.254.254.255. + +m|NON_LOOPBACK +|Picks any non-loopback address. + +m|LOOPBACK +|Picks a loopback address (for example, 127.0.0.1). + +m|match-interface: +|Picks an address that matches a pattern against the interface name. +For example, `match-interface:tun0` or `match-interface:eth.\*`. + +m|match-address: +|Picks an address that matches a pattern against the host address. +For example, `match-address:192.168.\*`. + +m|match-host: +|Picks an address that matches a pattern against the host name. +For example, `match-host:linux.\*`. + +|=== + +To set up for IPv6 only and have {project_name} pick the bind address automatically, use the following settings: + +[source,bash] +---- +export JAVA_OPTS_APPEND="-Djava.net.preferIPv4Stack=false -Djava.net.preferIPv6Addresses=true" +---- + +For more details about JGroups transport, check the http://jgroups.org/manual5/index.html#Transport[JGroups documentation page] or the {infinispan_embedding_docs}#cluster-transport[Infinispan documentation page]. + +== Running instances on different networks + +If you run {project_name} instances on different networks, for example behind firewalls or in containers, the different instances will not be able to reach each other by their local IP address. +In such a case, set up a port forwarding rule (sometimes called "`virtual server`") to their local IP address. + +When using port forwarding, use the following options so each node correctly advertises its external address to the other nodes: |=== +|Option | Description + +m|cache-embedded-network-external-port +|Port that other instances in the {project_name} cluster should use to contact this node. -NOTE: Use `-D=. +<@opts.printRelevantOptions includedOptions="cache cache-*" excludedOptions="cache-embedded-* cache-remote-*"> + +=== Embedded Cache +<@opts.includeOptions includedOptions="cache-embedded-*"/> + +=== Remote Cache +<@opts.includeOptions includedOptions="cache-remote-*"/> + + + diff --git a/docs/guides/server/configuration-production.adoc b/docs/guides/server/configuration-production.adoc index d2d25aaaa884..4993fad03a1e 100644 --- a/docs/guides/server/configuration-production.adoc +++ b/docs/guides/server/configuration-production.adoc @@ -4,7 +4,7 @@ <@tmpl.guide title="Configuring {project_name} for production" -summary="Learn how to make {project_name} ready for production." +summary="Prepare {project_name} for use in production." includedOptions=""> A {project_name} production environment provides secure authentication and authorization for deployments that range from on-premise deployments that support a few thousand users to deployments that serve millions of users. @@ -83,4 +83,13 @@ For example, to change the IP stack preference to IPv4, set an environment varia export JAVA_OPTS_APPEND="-Djava.net.preferIPv4Stack=true" ---- +To set up the server for IPv6 only, set an environment variable as follows for the distributed caches to form a cluster: + +[source, bash] +---- +export JAVA_OPTS_APPEND="-Djava.net.preferIPv4Stack=false -Djava.net.preferIPv6Addresses=true" +---- + +See <@links.server id="caching" anchor="network-bind-address"/> for more details. + diff --git a/docs/guides/server/configuration-provider.adoc b/docs/guides/server/configuration-provider.adoc index 38cb1bf13d25..211c51730934 100644 --- a/docs/guides/server/configuration-provider.adoc +++ b/docs/guides/server/configuration-provider.adoc @@ -5,7 +5,7 @@ <@tmpl.guide title="Configuring providers" -summary="Understand how to configure providers"> +summary="Configure providers for {project_name}."> The server is built with extensibility in mind and for that it provides a number of Service Provider Interfaces or SPIs, each one responsible for providing a specific capability to the server. In this {section}, you are going to understand the core concepts around @@ -19,6 +19,12 @@ any provider, including those you have implemented to extend the server capabili Providers can be configured by using a specific configuration format. The format consists of: [source] +---- +spi-----= +---- + +Or if there is no possibility of ambiguity between multiple providers: + ---- spi---= ---- @@ -27,7 +33,9 @@ The `` is the name of the SPI you want to configure. The `` is the id of the provider you want to configure. This is the id set to the corresponding provider factory implementation. -The `` is the actual name of the property you want to set for a given provider. +The `` is the actual name of the property you want to set for a given provider + +NOTE: the property name `enabled` is effectively reserved for enabling / disabling a provider All those names (for spi, provider, and property) should be in lower case and if the name is in camel-case such as `myKeycloakProvider`, it should include dashes (`-`) before upper-case letters as follows: `my-keycloak-provider`. @@ -35,31 +43,47 @@ Taking the `HttpClientSpi` SPI as an example, the name of the SPI is `connection [source] ---- -spi-connections-http-client-default-connection-pool-size=10 +spi-connections-http-client--default--connection-pool-size=10 ---- -== Setting a provider configuration option +=== Setting a provider configuration option Provider configuration options are provided when starting the server. See all support configuration sources and formats for options in <@links.server id="configuration"/>. For example via a command line option: .Setting the `connection-pool-size` for the `default` provider of the `connections-http-client` SPI -<@kc.start parameters="--spi-connections-http-client-default-connection-pool-size=10"/> +<@kc.start parameters="--spi-connections-http-client--default--connection-pool-size=10"/> + +== Build time options -== Configuring a single provider for an SPI +=== Configuring a single provider for an SPI Depending on the SPI, multiple provider implementations can co-exist but only one of them is going to be used at runtime. -For these SPIs, a specific provider is the primary implementation that is going to be active and used at runtime. +For these SPIs, a specific provider is the primary implementation that is going to be active and used at runtime. The format consists of: + +[source] +---- +spi---provider= +---- + +NOTE: `spi--provider=` may still be used, but the server will not properly detect when reaugmentation is needed. To configure a provider as the single provider you should run the `build` command as follows: .Marking the `mycustomprovider` provider as the single provider for the `email-template` SPI -<@kc.build parameters="--spi-email-template-provider=mycustomprovider"/> +<@kc.build parameters="--spi-email-template--provider=mycustomprovider"/> -== Configuring a default provider for an SPI +=== Configuring a default provider for an SPI Depending on the SPI, multiple provider implementations can co-exist and one is used by default. For these SPIs, a specific provider is the default implementation that is going to selected unless a specific provider -is requested. +is requested. The format consists of: + +[source] +---- +spi---provider-default= +---- + +NOTE: `spi--provider-default=` may still be used, but the server will not properly detect when reaugmentation is needed. The following logic is used to determine the default provider: @@ -70,15 +94,23 @@ The following logic is used to determine the default provider: To configure a provider as the default provider you should run the `build` command as follows: .Marking the `mycustomhash` provider as the default provider for the `password-hashing` SPI -<@kc.build parameters="--spi-password-hashing-provider-default=mycustomprovider"/> +<@kc.build parameters="--spi-password-hashing--provider-default=mycustomprovider"/> +=== Enabling and disabling a provider + +The format consists of: + +[source] +---- +spi-----enabled= +---- -== Enabling and disabling a provider +NOTE: `spi---enabled=` may still be used, but the server will not properly detect when reaugmentation is needed. To enable or disable a provider you should run the `build` command as follows: .Enabling a provider -<@kc.build parameters="--spi-email-template-mycustomprovider-enabled=true"/> +<@kc.build parameters="--spi-email-template--mycustomprovider--enabled=true"/> To disable a provider, use the same command and set the `enabled` property to `false`. diff --git a/docs/guides/server/configuration.adoc b/docs/guides/server/configuration.adoc index f424fdbabf85..150d16134336 100644 --- a/docs/guides/server/configuration.adoc +++ b/docs/guides/server/configuration.adoc @@ -4,7 +4,7 @@ <@tmpl.guide title="Configuring {project_name}" -summary="Understand how to configure and start {project_name}"> +summary="Configure and start {project_name}."> This {section} explains the configuration methods for {project_name} and how to start and apply the preferred configuration. It includes configuration guidelines for optimizing {project_name} for faster startup and low memory footprint. @@ -85,7 +85,7 @@ db-url-host=mykeycloakdb Alternatively, see <@links.server id="all-config"/> for all server options. -=== Formats for environment variables +=== Format for referencing environment variables You can use placeholders to resolve an environment specific value from environment variables inside the `keycloak.conf` file by using the `${r"++${ENV_VAR}++"}` syntax: [source] @@ -153,8 +153,33 @@ Note that some Quarkus properties are already mapped in the {project_name} confi To disable expression evaluation, the `\` character functions as an escape character. In particular, it must be used to escape the usage of `$` characters when they appear to define an expression or are repeated. For example, if you want the configuration value `my$$password`, use `my\$\$password` instead. Note that the `\` character requires additional escaping or quoting when using most unix shells, or when it appears in properties files. For example, bash single quotes preserve the single backslash `--db-password='my\$\$password'`. Also, with bash double quotes, you need an additional backslash `--db-password="my\\$\\$password"`. Similarly in a properties file, backslash characters must also be escaped: `kc.db-password=my\\$\\$password` +.Windows-specific considerations When specifying Windows file paths in configuration values, backslashes must also be escaped. For example, if you want to specify the path `C:\path\to\file`, you should write it as `C:\\path\\to\\file`. Alternatively, you can use forward slashes which don't need escaping: `C:/path/to/file`. +When using PowerShell and your values contain special characters like commas, use single quotes around double quotes: + +[source, bash] +---- +.\kc.bat start --log-level='"INFO,org.hibernate:debug"' +---- + +PowerShell handles quotes differently. It interprets the quoted string before passing it to the `kc.bat` script, removing the outer quote characters. Therefore, an extra layer of quotes is needed to preserve the value structure. Otherwise, the comma would be interpreted as a delimiter. In Windows CMD, you can use double quotes directly. + +=== Formats for environment variable keys with special characters + +Non-alphanumeric characters in your configuration key must be converted to `_` in the corresponding environment variable key. + +Environment variables are converted back to normal option keys by lower-casing the name and replacing `\_` with `-`. Logging wildcards are the exception as `_` in the category is replaced with `.` instead. Logging categories are commonly class / package names, which are more likely to use `.` rather than `-`. + +WARNING: Automatic mapping of the environment variable key to option key may not preserve the intended key + +For example `kc.log-level-package.class_name` will become the environment variable key `KC_LOG_LEVEL_PACKAGE_CLASS_NAME`. That will automatically be mapped to `kc.log-level-package.class.name` because `_` in the logging category will be replaced by `.`. Unfortunately this does not match the intent of `kc.log-level-package.class_name`. + +You have a couple of options in this case: + +- create an entry in your `keycloak.conf` file that references an environment variable of your choosing. e.g. `kc.log-level-package.class_name=${r"${CLASS_NAME_LEVEL}"}`. See more on referencing environment variables in <>. +- or in situations where modifying the `keycloak.conf` may not be easy, you can use a pair of environment variables `KC_UNIQUEIFIER=value` and `KCKEY_UNIQUEIFIER=key`, e.g. `KC_MYKEY=debug` and `KCKEY_MYKEY=log-level-package.class_name`, or `KC_LOG_LEVEL_PACKAGE_CLASS_NAME=debug` and `KCKEY_LOG_LEVEL_PACKAGE_CLASS_NAME=log-level-package.class_name` + == Starting {project_name} You can start {project_name} in `development mode` or `production mode`. Each mode offers different defaults for the intended environment. @@ -271,12 +296,12 @@ You can achieve most optimizations to startup and runtime behavior by using the Some of the realm capabilities allow administrators to reference system variables such as environment variables and system properties when configuring the realm and its components. -By default, {project_name} disallow using system variables but only those explicitly specified through the `spi-admin-allowed-system-variables` configuration +By default, {project_name} disallow using system variables but only those explicitly specified through the `+spi-admin--allowed-system-variables+` configuration option. This option allows you to specify a comma-separated list of keys that will eventually resolve to values from system variables with the same key. . Start the server and expose a set of system variables to the server runtime + - <@kc.start parameters="--spi-admin-allowed-system-variables=FOO,BAR"/> + <@kc.start parameters="--spi-admin--allowed-system-variables=FOO,BAR"/> In future releases, this capability will be removed in favor of preventing any usage of system variables in the realm configuration. diff --git a/docs/guides/server/containers.adoc b/docs/guides/server/containers.adoc index 50db3b86c392..f3e168cf8b88 100644 --- a/docs/guides/server/containers.adoc +++ b/docs/guides/server/containers.adoc @@ -6,7 +6,7 @@ <@tmpl.guide title="Running {project_name} in a container" -summary="Learn how to run {project_name} from a container image" +summary="Run {project_name} from a container image." includedOptions="db db-url db-username db-password features hostname https-key-store-file https-key-store-password health-enabled metrics-enabled"> This {section} describes how to optimize and run the {project_name} container image to provide the best experience running a container. @@ -120,6 +120,28 @@ This approach uses a chroot, `+/mnt/rootfs+`, so that only the packages you spec WARNING: Some packages have a large tree of dependencies. By installing new RPMs you may unintentionally increase the container's attack surface. Check the list of installed packages carefully. +=== Custom ENTRYPOINT shell scripts + +If you use a custom entry point script, start {project_name} with `exec` so it can receive termination signals that are essential for a graceful shutdown. + +.Correct approach for an ENTRYPOINT shell script +[source,bash] +---- +#!/bin/bash + +# (add your custom logic here) + +# Run the 'exec' command as the last step of the script. +# As it replaces the current shell process, no additional shell commands will run after the 'exec' command. +exec /opt/keycloak/bin/kc.sh start "$@" +---- + +[WARNING] +==== +Without `exec`, the shell script remains PID 1 in the container and blocks signals like `SIGTERM` from reaching {project_name}. +This prevents a graceful shutdown and can lead to cache inconsistencies or data loss. +==== + === Building the container image To build the actual container image, run the following command from the directory containing your Containerfile: @@ -128,6 +150,13 @@ To build the actual container image, run the following command from the director podman|docker build . -t mykeycloak ---- +<@profile.ifProduct> +[NOTE] +==== +Podman can be used only for creating or customizing images. Podman is not supported for running {project_name} in production environments. +==== + + === Starting the optimized {project_name} container image To start the image, run: @@ -145,6 +174,33 @@ Health check endpoints are available at `https://localhost:9000/health`, `https: Opening up `https://localhost:9000/metrics` leads to a page containing operational metrics that could be used by your monitoring solution. +=== Known issues with Docker + +* If a `RUN dnf install` command seems to be taking an excessive amount of time, then likely your Docker systemd service has the file limit setting `LimitNOFILE` configured incorrectly. +Either update the service configuration to use a better value, such as 1024000, or directly use `ulimit` in the RUN command: + +[source, dockerfile] +---- +... +RUN ulimit -n 1024000 && dnf install --installroot ... +... +---- + +* If you are including provider JARs and your container fails a `start --optimized` with a notification that a provider JAR has changed, this is due to Docker truncating +or otherwise modifying file modification timestamps from what the `build` command recorded to what is seen at runtime. + In this case you will need to force the image to use a known timestamp of your choosing with a `touch` command prior to running a `build`: + +[source, dockerfile] +---- +... +# ADD or copy one or more provider jars +ADD --chown=keycloak:keycloak --chmod=644 some-jar.jar /opt/keycloak/providers/ +... +RUN touch -m --date=@1743465600 /opt/keycloak/providers/* +RUN /opt/keycloak/bin/kc.sh build +... +---- + == Exposing the container to a different port By default, the server is listening for `http` and `https` requests using the ports `8080` and `8443`, respectively. @@ -168,7 +224,7 @@ You use the `start-dev` command: [source,bash,subs="attributes+"] ---- -podman|docker run --name mykeycloak -p 8080:8080 \ +podman|docker run --name mykeycloak -p 127.0.0.1:8080:8080 \ -e KC_BOOTSTRAP_ADMIN_USERNAME=admin -e KC_BOOTSTRAP_ADMIN_PASSWORD=change_me \ quay.io/keycloak/keycloak:{containerlabel} \ start-dev @@ -187,10 +243,11 @@ For example: [source,bash,subs="attributes+"] ---- -podman|docker run --name mykeycloak -p 8080:8080 \ +podman|docker run --name mykeycloak -p 127.0.0.1:8080:8080 \ -e KC_BOOTSTRAP_ADMIN_USERNAME=admin -e KC_BOOTSTRAP_ADMIN_PASSWORD=change_me \ quay.io/keycloak/keycloak:{containerlabel} \ start \ + --hostname=localhost --http-enabled=true --db=postgres --features=token-exchange \ --db-url= --db-username= --db-password= \ --https-key-store-file= --https-key-store-password= @@ -220,7 +277,7 @@ The {project_name} containers have a directory `/opt/keycloak/data/import`. If y [source,bash,subs="attributes+"] ---- -podman|docker run --name keycloak_unoptimized -p 8080:8080 \ +podman|docker run --name keycloak_unoptimized -p 127.0.0.1:8080:8080 \ -e KC_BOOTSTRAP_ADMIN_USERNAME=admin -e KC_BOOTSTRAP_ADMIN_PASSWORD=change_me \ -v /path/to/realm/data:/opt/keycloak/data/import \ quay.io/keycloak/keycloak:{containerlabel} \ @@ -249,7 +306,7 @@ For example, you can specify the environment variable and memory limit as follow [source,bash,subs="attributes+"] ---- -podman|docker run --name mykeycloak -p 8080:8080 -m 1g \ +podman|docker run --name mykeycloak -p 127.0.0.1:8080:8080 -m 1g \ -e KC_BOOTSTRAP_ADMIN_USERNAME=admin -e KC_BOOTSTRAP_ADMIN_PASSWORD=change_me \ -e JAVA_OPTS_KC_HEAP="-XX:MaxHeapFreeRatio=30 -XX:MaxRAMPercentage=65" \ quay.io/keycloak/keycloak:{containerlabel} \ diff --git a/docs/guides/server/db.adoc b/docs/guides/server/db.adoc index 51faa046888b..98cd28d85a53 100644 --- a/docs/guides/server/db.adoc +++ b/docs/guides/server/db.adoc @@ -6,7 +6,7 @@ <@tmpl.guide title="Configuring the database" - summary="An overview about how to configure relational databases" + summary="Configure a relational database for {project_name} to store user, client, and realm data." includedOptions="db db-* transaction-xa-enabled"> This {section} explains how to configure the {project_name} server to store data in a relational database. @@ -53,15 +53,15 @@ or skip this section if you want to connect to a different database for which th To install the Oracle Database driver for {project_name}: -. Download the `ojdbc11` and `orai18n` JAR files from one of the following sources: +. Download the `ojdbc17` and `orai18n` JAR files from one of the following sources: .. *Zipped JDBC driver and Companion Jars* version ${properties["oracle-jdbc.version"]} from the https://www.oracle.com/database/technologies/appdev/jdbc-downloads.html[Oracle driver download page]. -.. Maven Central via `link:++https://repo1.maven.org/maven2/com/oracle/database/jdbc/ojdbc11/${properties["oracle-jdbc.version"]}/ojdbc11-${properties["oracle-jdbc.version"]}.jar++[ojdbc11]` and `link:++https://repo1.maven.org/maven2/com/oracle/database/nls/orai18n/${properties["oracle-jdbc.version"]}/orai18n-${properties["oracle-jdbc.version"]}.jar++[orai18n]`. +.. Maven Central via `link:++https://repo1.maven.org/maven2/com/oracle/database/jdbc/ojdbc17/${properties["oracle-jdbc.version"]}/ojdbc17-${properties["oracle-jdbc.version"]}.jar++[ojdbc17]` and `link:++https://repo1.maven.org/maven2/com/oracle/database/nls/orai18n/${properties["oracle-jdbc.version"]}/orai18n-${properties["oracle-jdbc.version"]}.jar++[orai18n]`. .. Installation media recommended by the database vendor for the specific database in use. -. When running the unzipped distribution: Place the `ojdbc11` and `orai18n` JAR files in {project_name}'s `providers` folder +. When running the unzipped distribution: Place the `ojdbc17` and `orai18n` JAR files in {project_name}'s `providers` folder . When running containers: Build a custom {project_name} image and add the JARs in the `providers` folder. When building a custom image for the Operator, those images need to be optimized images with all build-time options of {project_name} set. + @@ -70,7 +70,7 @@ A minimal Containerfile to build an image which can be used with the {project_na [source,dockerfile,subs="attributes+"] ---- FROM quay.io/keycloak/keycloak:{containerlabel} -ADD --chown=keycloak:keycloak --chmod=644 https://repo1.maven.org/maven2/com/oracle/database/jdbc/ojdbc11/${properties["oracle-jdbc.version"]}/ojdbc11-${properties["oracle-jdbc.version"]}.jar /opt/keycloak/providers/ojdbc11.jar +ADD --chown=keycloak:keycloak --chmod=644 https://repo1.maven.org/maven2/com/oracle/database/jdbc/ojdbc17/${properties["oracle-jdbc.version"]}/ojdbc17-${properties["oracle-jdbc.version"]}.jar /opt/keycloak/providers/ojdbc17.jar ADD --chown=keycloak:keycloak --chmod=644 https://repo1.maven.org/maven2/com/oracle/database/nls/orai18n/${properties["oracle-jdbc.version"]}/orai18n-${properties["oracle-jdbc.version"]}.jar /opt/keycloak/providers/orai18n.jar # Setting the build parameter for the database: ENV KC_DB=oracle @@ -271,6 +271,18 @@ show server_encoding; create database keycloak with encoding 'UTF8'; ---- +== Preparing for PostgreSQL + +When running PostgreSQL reader and writer instances, {project_name} needs to always connect to the writer instance to do its work. +When using the original PostgreSQL driver, {project_name} sets the `targetServerType` property of the PostgreSQL JDBC driver to `primary` to ensure that it always connects to a writable primary instance and never connects to a secondary reader instance in failover or switchover scenarios. + +You can override this behavior by setting your own value for `targetServerType` in the DB URL or additional properties. + +[NOTE] +==== +The `targetServerType` is only applied automatically to the primary datasource, as requirements might be different for additional datasources. +==== + [[preparing-keycloak-for-amazon-aurora-postgresql]] == Preparing for Amazon Aurora PostgreSQL @@ -296,6 +308,8 @@ See the <@links.server id="containers" /> {section} for details on how to build `db-url`:: Insert `aws-wrapper` to the regular PostgreSQL JDBC URL resulting in a URL like `+jdbc:aws-wrapper:postgresql://...+`. `db-driver`:: Set to `software.amazon.jdbc.Driver` to use the AWS JDBC wrapper. +NOTE: When overriding the `wrapperPlugins` option of the AWS JDBC Driver, always include the `failover` or `failover2` plugin to ensure that {project_name} always connects to the writer instance even in failover or switchover scenarios. + == Preparing for MySQL server Beginning with MySQL 8.0.30, MySQL supports generated invisible primary keys for any InnoDB table that is created without an explicit primary key (more information https://dev.mysql.com/doc/refman/8.0/en/create-table-gipks.html[here]). @@ -308,7 +322,7 @@ Because cluster nodes can boot concurrently, they take extra time for database a The maximum timeout for this lock is 900 seconds. If a node waits on this lock for more than the timeout, the boot fails. The need to change the default value is unlikely, but you can change it by entering this command: -<@kc.start parameters="--spi-dblock-jpa-lock-wait-timeout 900"/> +<@kc.start parameters="--spi-dblock--jpa--lock-wait-timeout 900"/> == Using Database Vendors with XA transaction support {project_name} uses non-XA transactions and the appropriate database drivers by default. @@ -330,16 +344,18 @@ NOTE: Enabling XA transactions in a containerized environment does not fully sup To setup the JPA migrationStrategy (manual/update/validate) you should setup JPA provider as follows: .Setting the `migration-strategy` for the `quarkus` provider of the `connections-jpa` SPI -<@kc.start parameters="--spi-connections-jpa-quarkus-migration-strategy=manual"/> +<@kc.start parameters="--spi-connections--jpa--quarkus-migration-strategy=manual"/> If you want to get a SQL file for DB initialization, too, you have to add this additional SPI initializeEmpty (true/false): .Setting the `initialize-empty` for the `quarkus` provider of the `connections-jpa` SPI -<@kc.start parameters="--spi-connections-jpa-quarkus-initialize-empty=false"/> +<@kc.start parameters="--spi-connections--jpa--quarkus-initialize-empty=false"/> In the same way the migrationExport to point to a specific file and location: .Setting the `migration-export` for the `quarkus` provider of the `connections-jpa` SPI -<@kc.start parameters="--spi-connections-jpa-quarkus-migration-export=/"/> +<@kc.start parameters="--spi-connections--jpa--quarkus-migration-export=/"/> + +For more information, check the link:{upgrading_guide_link}#_migrate_db[Migrating the database] documentation. diff --git a/docs/guides/server/directory-structure.adoc b/docs/guides/server/directory-structure.adoc index 7c6c4006e040..4aa5f26ac2ef 100644 --- a/docs/guides/server/directory-structure.adoc +++ b/docs/guides/server/directory-structure.adoc @@ -4,7 +4,7 @@ <@tmpl.guide title="Directory Structure" -summary="Understand the purpose of the directories under the installation root" +summary="Understand the purpose of the directories under the installation root." includedOptions=""> == Installation Locations diff --git a/docs/guides/server/enabletls.adoc b/docs/guides/server/enabletls.adoc index 3286eda3b6aa..875f021733ca 100644 --- a/docs/guides/server/enabletls.adoc +++ b/docs/guides/server/enabletls.adoc @@ -1,12 +1,11 @@ <#import "/templates/guide.adoc" as tmpl> <#import "/templates/kc.adoc" as kc> +<#import "/templates/options.adoc" as opts> <#import "/templates/links.adoc" as links> <@tmpl.guide title="Configuring TLS" -summary="Learn how to configure {project_name}'s https certificates for ingoing and outgoing requests." -includedOptions="https-* http-enabled" -excludedOptions="https-trust-store-* https-client-auth https-management-client-auth"> +summary="Configure {project_name}'s https certificates for ingoing and outgoing requests."> Transport Layer Security (short: TLS) is crucial to exchange data over a secured channel. For production environments, you should never expose {project_name} endpoints through HTTP, as sensitive data is at the core of what {project_name} exchanges with other applications. @@ -64,4 +63,11 @@ To also allow TLSv1.2, use a command such as the following: `kc.sh start --https By default {project_name} will reload the certificates, keys, and keystores specified in `+https-*+` options every hour. For environments where your server keys may need frequent rotation, this allows that to happen without a server restart. You may override the default via the `https-certificates-reload-period` option. Interval on which to reload key store, trust store, and certificate files referenced by `+https-*+` options. The value may be a java.time.Duration value, an integer number of seconds, or an integer followed by one of the time units [`ms`, `h`, `m`, `s`, `d`]. Must be greater than 30 seconds. Use `-1` to disable. +<@opts.printRelevantOptions includedOptions="https-* http-enabled" excludedOptions="https-trust-store-* https-client-auth https-management-* https-management-client-auth"> + +=== Management server +<@opts.includeOptions includedOptions="https-management-*" excludedOptions="https-management-client-auth"/> + + + diff --git a/docs/guides/server/features.adoc b/docs/guides/server/features.adoc index 5f1029915dbf..94f11b0589d8 100644 --- a/docs/guides/server/features.adoc +++ b/docs/guides/server/features.adoc @@ -3,7 +3,7 @@ <@tmpl.guide title="Enabling and disabling features" -summary="Understand how to configure {project_name} to use optional features" +summary="Configure {project_name} to use optional features." includedOptions="features features-*"> {project_name} has packed some functionality in features, including some disabled features, such as Technology Preview and deprecated features. Other features are enabled by default, but you can disable them if they do not apply to your use of {project_name}. @@ -24,11 +24,11 @@ To enable all preview features, enter this command: Enabled feature may be versioned, or unversioned. If you use a versioned feature name, e.g. feature:v1, that exact feature version will be enabled as long as it still exists in the runtime. If you instead use an unversioned name, e.g. just feature, the selection of the particular supported feature version may change from release to release according to the following precedence: -1. The highest default supported version -1. The highest non-default supported version -1. The highest deprecated version -1. The highest preview version -1. The highest experimental version +. The highest default supported version +. The highest non-default supported version +. The highest deprecated version +. The highest preview version +. The highest experimental version == Disabling features @@ -81,7 +81,7 @@ There are no deprecated features in this {project_name} release. <#macro showFeatures features> <#list features as feature> -[.features-name]#${feature.name}#:: +[.features-name]#${feature.versionedKey}#:: [.features-description]#${feature.description}# - \ No newline at end of file + diff --git a/docs/guides/server/fips.adoc b/docs/guides/server/fips.adoc index 2427c85d296d..a2504216cbd7 100644 --- a/docs/guides/server/fips.adoc +++ b/docs/guides/server/fips.adoc @@ -4,7 +4,7 @@ <@tmpl.guide title="FIPS 140-2 support" -summary="How to configure {project_name} server for FIPS compliance" +summary="Configure {project_name} server for FIPS compliance." includedOptions=""> The Federal Information Processing Standard Publication 140-2, (FIPS 140-2), is a U.S. government computer security standard used to approve cryptographic modules. {project_name} supports running in FIPS 140-2 compliant mode. In this case, {project_name} will use only FIPS approved cryptography algorithms for its functionality. @@ -31,7 +31,7 @@ fips-mode-setup --enable == BouncyCastle library {project_name} internally uses the BouncyCastle library for many cryptography utilities. Please note that the default version of the BouncyCastle library that shipped with {project_name} is not FIPS compliant; -however, BouncyCastle also provides a FIPS validated version of its library. The FIPS validated BouncyCastle library cannot be shipped with {project_name} due to license constraints and +however, BouncyCastle also provides a FIPS validated version of its library. The FIPS validated BouncyCastle library is not shipped with {project_name} as {project_name} cannot provide official support of it. Therefore, to run in FIPS compliant mode, you need to download BouncyCastle-FIPS bits and add them to the {project_name} distribution. When {project_name} executes in fips mode, it will use the BCFIPS bits instead of the default BouncyCastle bits, which achieves FIPS compliance. @@ -118,7 +118,14 @@ Using that option results in stricter security requirements on cryptography and NOTE: In strict mode, the default keystore type (as well as default truststore type) is BCFKS. If you want to use a different keystore type it is required to use the option `--https-key-store-type` with appropriate type. A similar command might be needed for the truststore as well if you want to use it. -When starting the server, you can check that the startup log contains `KC` provider with the note about `Approved Mode` such as the following: +When starting the server, you can include TRACE level in the startup command. For example: + +[source,bash,subs=+attributes] +---- +--log-level=INFO,org.keycloak.common.crypto.CryptoIntegration:TRACE +---- + +By using TRACE level, you can check that the startup log contains `KC` provider with the note about `Approved Mode` such as the following: [source] ---- @@ -138,14 +145,14 @@ So effectively, you can use an option such as the following when starting the se [source] ---- ---spi-password-hashing-pbkdf2-sha512-max-padding-length=14 +--spi-password-hashing--pbkdf2-sha512--max-padding-length=14 ---- NOTE: Using the option above does not break FIPS compliance. However, note that longer passwords are good practice anyway. For example, passwords auto-generated by modern browsers match this requirement as they are longer than 14 characters. If you want to omit the option for max-padding-length, you can set the password policy to your realms to have passwords at least 14 characters long. NOTE: When you are migrating from {project_name} older than 24, or if you explicitly set the password policy to override the default hashing algorithm, it is possible that some of your users use an older -algorithm like `pbkdf2-sha256`. In this case, consider adding the `--spi-password-hashing-pbkdf2-sha256-max-padding-length=14` option to ensure that users having their passwords hashed with +algorithm like `pbkdf2-sha256`. In this case, consider adding the `+--spi-password-hashing--pbkdf2-sha256--max-padding-length=14+` option to ensure that users having their passwords hashed with the older `pbkdf2-sha256` can log in because their passwords may be shorter than 14 characters. * RSA keys of 1024 bits do not work (2048 is the minimum). This applies for keys used by the {project_name} realm itself (Realm keys from the `Keys` tab in the admin console), but also client keys and IDP keys diff --git a/docs/guides/server/hostname.adoc b/docs/guides/server/hostname.adoc index e214375e0ba1..92be53e57171 100644 --- a/docs/guides/server/hostname.adoc +++ b/docs/guides/server/hostname.adoc @@ -4,7 +4,7 @@ <@tmpl.guide title="Configuring the hostname (v2)" -summary="Learn how to configure the frontend and backchannel endpoints exposed by {project_name}." +summary="Configure the frontend and backchannel endpoints exposed by {project_name}." includedOptions="hostname hostname-* proxy" deniedCategories="hostname_v1"> @@ -152,7 +152,7 @@ As indicated in the previous sections, URLs can be resolved in several ways: the ** `hostname-backchannel-dynamic` must be set to false. ** `hostname-strict` must be set to false. * If `hostname-admin` is configured, `hostname` must be set to a URL (not just hostname). Otherwise {project_name} would not know what is the correct frontend URL (incl. port etc.) when accessing the Admin Console. -* If `hostname-backchannel-dynamic` is set to true, `hostname` must be set to a URL (not just hostname). Otherwise {project_name} would not know what is the correct frontend URL (incl. port etc.) when being access via the dynamically resolved bachchannel. +* If `hostname-backchannel-dynamic` is set to true, `hostname` must be set to a URL (not just hostname). Otherwise {project_name} would not know what is the correct frontend URL (incl. port etc.) when being access via the dynamically resolved backchannel. Additionally if hostname is configured, then hostname-strict is ignored. diff --git a/docs/guides/server/importExport.adoc b/docs/guides/server/importExport.adoc index 6d535baed04e..965fde242fc6 100644 --- a/docs/guides/server/importExport.adoc +++ b/docs/guides/server/importExport.adoc @@ -3,21 +3,20 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide - title="Importing and Exporting Realms" - summary="An overview about how to import and export realms"> + title="Importing and exporting realms" + summary="Import and export realms as JSON files."> In this {section}, you are going to understand the different approaches for importing and exporting realms using JSON files. [NOTE] ==== -Exporting and importing into single files can produce large files, so if your database contains more than 500 users, export to a directory and not a single file. -Using a directory performs better as the directory provider uses a separate transaction for each "page" (a file of users). -The default count of users per file and per transaction is fifty. -Increasing this to a larger number leads to an exponentially increasing execution time. -==== +Exporting and importing into single files can produce large files which may run the export / import process out of memory. If your database contains more than 50,000 users, export to a directory and not a single file. +The default count of users per file and per transaction is fifty, but you may use a much larger value if desired. + +The `import` and `export` commands are essentially server launches that exit before bringing up the full server. They are not currently designed to be run from the same machine as a running server instance, which may result in port or other conflicts. -All {project_name} nodes need to be stopped prior to using `kc.[sh|bat] import | export` commands. This ensures that the resulting operations will have no consistency issues with concurrent requests. -It also ensures that running an import or export command from the same machine as a server instance will not result in port or other conflicts. +It is recommended that all {project_name} nodes are stopped prior to using the `kc.[sh|bat] export` command. This ensures that the results will have no consistency issues with user or realm modifications during the export. +==== == Providing options for database connection parameters @@ -31,7 +30,7 @@ As default, {project_name} will re-build automatically for the `export` and `imp If you have built an optimized version of {project_name} with the `build` command as outlined in <@links.server id="configuration"/>, use the command line option `--optimized` to have {project_name} skip the build check for a faster startup time. When doing this, remove the build time options from the command line and keep only the runtime options. -NOTE: if you do not use `--optimized` keep in mind that an `import` or `export` command will implicitly create or update an optimized image for you - if you are running the command from the same machine as a server instance, this may impact the next start of your server. +NOTE: if you do not use `--optimized` keep in mind that an `import` or `export` command may implicitly create or update an optimized build for you - if you are running the command from the same machine as a server instance, this may impact the next start of your server. == Exporting a Realm to a Directory @@ -76,6 +75,12 @@ If you do not specify a specific realm to export, all realms are exported. To ex <@kc.export parameters="[--dir|--file] --realm my-realm"/> +== Import File Naming Conventions + +When you export a realm specific file name conventions are used, which must also be used for importing from a directory or import at startup. The realm file to be imported must be named -realm.json. +Regular and federated user files associated with a realm must be named -users-.json and -federated-users-.json. Failure to use this convention will result in errors or +user files not being imported. + == Importing a Realm from a Directory To import a realm, you can use the `import` command. Your {project_name} server instance must not be started when invoking this command. diff --git a/docs/guides/server/keycloak-truststore.adoc b/docs/guides/server/keycloak-truststore.adoc index 2064a3eaaa23..17c16daf8519 100644 --- a/docs/guides/server/keycloak-truststore.adoc +++ b/docs/guides/server/keycloak-truststore.adoc @@ -3,7 +3,7 @@ <@tmpl.guide title="Configuring trusted certificates" -summary="How to configure the {project_name} Truststore to communicate through TLS." +summary="Configure the {project_name} Truststore to communicate through TLS." includedOptions="truststore-paths tls-hostname-verifier"> When {project_name} communicates with external services or has an incoming connection through TLS, it has to validate the remote certificate in order to ensure it is connecting to a trusted server. This is necessary in order to prevent man-in-the-middle attacks. @@ -29,7 +29,7 @@ It is still possible to directly set your own `javax.net.ssl` truststore System You may refine how hostnames are verified by TLS connections with the `tls-hostname-verifier` property. * `DEFAULT` (the default) allows wildcards in subdomain names (e.g. *.foo.com) to match names with the same number of levels (e.g. a.foo.com, but not a.b.foo.com) - with rules and exclusions for public suffixes based upon https://publicsuffix.org/list/ -* `ANY` means that the hostname is not verified. +* `ANY` means that the hostname is not verified - this mode should not be used in production. * `WILDCARD` (deprecated) allows wildcards in subdomain names (e.g. *.foo.com) to match anything, including multiple levels (e.g. a.b.foo.com). Use DEFAULT instead. * `STRICT` (deprecated) allows wildcards in subdomain names (e.g. *.foo.com) to match names with the same number of levels (e.g. a.foo.com, but not a.b.foo.com) - with some limited exclusions. Use DEFAULT instead. + diff --git a/docs/guides/server/logging.adoc b/docs/guides/server/logging.adoc index 76a2df095896..57c396974d7e 100644 --- a/docs/guides/server/logging.adoc +++ b/docs/guides/server/logging.adoc @@ -6,8 +6,7 @@ <@tmpl.guide title="Configuring logging" -summary="Learn how to configure Logging" -includedOptions="log log-*"> +summary="Configure logging for {project_name}."> {project_name} uses the JBoss Logging framework. The following is a high-level overview for the available log handlers with the common parent log handler `root`: @@ -99,7 +98,7 @@ The available handlers are: The more specific handler configuration mentioned below will only take effect when the handler is added to this comma-separated list. -== Specify log level for each handler +=== Specify log level for each handler The `log-level` property specifies the global root log level and levels for selected categories. However, a more fine-grained approach for log levels is necessary to comply with the modern application requirements. @@ -118,7 +117,7 @@ More information in log handlers settings below. Only log levels specified in <> section are accepted, and *must be in lowercase*. There is no support for specifying particular categories for log handlers yet. -=== General principle +==== General principle It is necessary to understand that setting the log levels for each particular handler *does not override the root level* specified in the `log-level` property. Log handlers respect the root log level, which represents the maximal verbosity for the whole logging system. @@ -128,7 +127,7 @@ Specifically, when an arbitrary log level is defined for the handler, it does no In that case, the root `log-level` must also be assessed. Log handler levels provide the *restriction for the root log level*, and the default log level for log handlers is `all` - without any restriction. -=== Examples +==== Examples .Example: `debug` for file handler, but `info` for console handler: <@kc.start parameters="--log=console,file --log-level=debug --log-console-level=info"/> @@ -146,6 +145,88 @@ The root level must be set to the most verbose required level (`debug` in this c In order to see the `org.keycloak.events:trace`, the `trace` level must be set for the Syslog handler. +=== Use different JSON format for log handlers +Every log handler provides the ability to have structured log output in JSON format. +It can be enabled by properties in the format `log--output=json` (where `` is a log handler). + +If you need a different format of the produced JSON, you can leverage the following JSON output formats: + +* `default` (default) +* `ecs` + +The `ecs` value refers to the https://www.elastic.co/guide/en/ecs-logging/overview/current/intro.html[ECS] (Elastic Common Schema). + +ECS is an open-source, community-driven specification that defines a common set of fields to be used with Elastic solutions. +The ECS specification is being converged with https://opentelemetry.io/docs/concepts/semantic-conventions/[OpenTelemetry Semantic Conventions] with the goal of creating a single standard maintained by OpenTelemetry. + +In order to change the JSON output format, properties in the format `log--json-format` (where `` is a log handler) were introduced: + +* `log-console-json-format` - Console log handler +* `log-file-json-format` - File log handler +* `log-syslog-json-format` - Syslog log handler + +==== Example +If you want to have JSON logs in *ECS* (Elastic Common Schema) format for the console log handler, you can enter the following command: + +<@kc.start parameters="--log-console-output=json --log-console-json-format=ecs"/> + +.Example Log Message +[source,json] +---- +{"@timestamp":"2025-02-03T14:53:22.539484211+01:00","event.sequence":9608,"log.logger":"io.quarkus","log.level":"INFO","message":"Keycloak 999.0.0-SNAPSHOT on JVM (powered by Quarkus 3.17.8) started in 4.615s. Listening on: http://0.0.0.0:8080","process.thread.name":"main","process.thread.id":1,"mdc":{},"ndc":"","host.hostname":"host-name","process.name":"/usr/lib/jvm/jdk-21.0.3+9/bin/java","process.pid":77561,"data_stream.type":"logs","ecs.version":"1.12.2","service.environment":"prod","service.name":"Keycloak","service.version":"999.0.0-SNAPSHOT"} +---- + +=== Asynchronous logging +{project_name} supports asynchronous logging, which might be useful for deployments requiring **high throughput** and **low latency**. +Asynchronous logging uses a separate thread to take care of processing all log records. +The logging handlers are invoked in exactly the same way as with synchronous logging, only done in separate threads. +You can enable asynchronous logging for all {project_name} log handlers. +A dedicated thread will be created for every log handler with enabled asynchronous logging. + +The underlying mechanism for asynchronous logging uses a queue for processing log records. +Every new log record is added to the queue and then published to the particular log handler with enabled asynchronous logging. +Every log handler has a different queue. + +If the queue is already full, it blocks the main thread and waits for free space in the queue. + +==== When to use asynchronous logging + +* You need **lower latencies** for incoming requests +* You need **higher throughput** +* You have **small worker thread pool** and want to offload logging to separate threads +* You want to reduce the impact of **I/O-heavy log handlers** +* You are logging to **remote destinations** (e.g., network syslog servers) and want to avoid blocking worker threads + +WARNING: Be aware that enabling asynchronous logging might bring some **additional memory overhead** due to the additional separate thread and the inner queue. +In that case, it is not recommended to use it for resource-constrained environments. +Additionally, unexpected server shutdowns create a risk of **losing log records**. + +==== Enable asynchronous logging +You can enable asynchronous logging globally for all log handlers by using `log-async` property as follows: + +<@kc.start parameters="--log-async=true"/> + +Or you can enable the asynchronous logging for every specific handler by using properties in the format `log--async` (where `` is a log handler). +If the property for a specific handler is not set, the value from the parent `log-async` property is used. + +You can use these properties as follows: + +<@kc.start parameters="--log-console-async=true --log-file-async=true --log-syslog-async=true"/> + +* `log-console-async` - Console log handler +* `log-file-async` - File log handler +* `log-syslog-async` - Syslog log handler + +==== Change queue length +You can change the size of the queue used for the asynchronous logging. +The default size is **512** log records in the queue. + +You can change the queue length as follows: + +<@kc.start parameters="--log-console-async-queue-length=512 --log-file-async-queue-length=512 --log-syslog-async-queue-length=512"/> + +These properties are available only when asynchronous logging is enabled for these specific log handlers. + == Console log handler The console log handler is enabled by default, providing unstructured log messages for the console. @@ -204,7 +285,7 @@ By default, the console log handler logs plain unstructured data to the console. .Example Log Message [source, json] ---- -{"timestamp":"2022-02-25T10:31:32.452+01:00","sequence":8442,"loggerClassName":"org.jboss.logging.Logger","loggerName":"io.quarkus","level":"INFO","message":"Keycloak 18.0.0-SNAPSHOT on JVM (powered by Quarkus 2.7.2.Final) started in 3.253s. Listening on: http://0.0.0.0:8080","threadName":"main","threadId":1,"mdc":{},"ndc":"","hostName":"host-name","processName":"QuarkusEntryPoint","processId":36946} +{"timestamp":"2025-02-03T14:52:20.290353085+01:00","sequence":9605,"loggerClassName":"org.jboss.logging.Logger","loggerName":"io.quarkus","level":"INFO","message":"Keycloak 999.0.0-SNAPSHOT on JVM (powered by Quarkus 3.17.8) started in 4.440s. Listening on: http://0.0.0.0:8080","threadName":"main","threadId":1,"mdc":{},"ndc":"","hostName":"host-name","processName":"/usr/lib/jvm/jdk-21.0.3+9/bin/java","processId":76944} ---- When using JSON output, colors are disabled and the format settings set by `--log-console-format` will not apply. @@ -216,7 +297,7 @@ To use unstructured logging, enter the following command: .Example Log Message [source] ---- -2022-03-02 10:36:50,603 INFO [io.quarkus] (main) Keycloak 18.0.0-SNAPSHOT on JVM (powered by Quarkus 2.7.2.Final) started in 3.615s. Listening on: http://0.0.0.0:8080 +2025-02-03 14:53:56,653 INFO [io.quarkus] (main) Keycloak 999.0.0-SNAPSHOT on JVM (powered by Quarkus 3.17.8) started in 4.795s. Listening on: http://0.0.0.0:8080 ---- === Colors @@ -308,6 +389,23 @@ To use UDP instead of TCP, add the `--log-syslog-protocol` option as follows: The available protocols are: `tpc`, `udp`, and `ssl-tcp`. +=== Configuring the Syslog counting framing + +By default, Syslog messages sent over TCP or SSL-TCP are prefixed with the message size, as required by certain Syslog receivers. +This behavior is controlled by the `--log-syslog-counting-framing` option. + +To explicitly enable or disable this feature, use the following command: + +<@kc.start parameters="--log-syslog-counting-framing=true"/> + +You can set the value to one of the following: + +* `protocol-dependent` (default) – Enable counting framing only when the `log-syslog-protocol` is `tcp` or `ssl-tcp`. +* `true` – Always enable counting framing by prefixing messages with their size. +* `false` – Never use counting framing. + +Note that using `protocol-dependent` ensures compatibility with most Syslog servers by enabling the prefix only when required by the protocol. + === Configuring the Syslog log format To set the logging format for a logged line, perform these steps: @@ -376,4 +474,17 @@ To use unstructured logging, enter the following command: As you can see, the timestamp is present twice, so you can amend it correspondingly via the `--log-syslog-format` property. +<@opts.printRelevantOptions includedOptions="log log-*" excludedOptions="log-console-* log-file log-file-* log-syslog-*"> + +=== Console +<@opts.includeOptions includedOptions="log-console-*"/> + +=== File +<@opts.includeOptions includedOptions="log-file log-file-*"/> + +=== Syslog +<@opts.includeOptions includedOptions="log-syslog-*"/> + + + diff --git a/docs/guides/server/management-interface.adoc b/docs/guides/server/management-interface.adoc index 5fd706592fea..8ee7815284c6 100644 --- a/docs/guides/server/management-interface.adoc +++ b/docs/guides/server/management-interface.adoc @@ -4,7 +4,7 @@ <@tmpl.guide title="Configuring the Management Interface" -summary="Learn how to configure {project_name}'s management interface for endpoints like metrics and health checks." +summary="Configure {project_name}'s management interface for endpoints such as metrics and health checks." includedOptions="http-management-* https-management-* legacy-observability-interface"> The management interface allows accessing management endpoints via a different HTTP server than the primary one. @@ -36,9 +36,11 @@ if you set the CLI option `--http-relative-path=/auth`, these endpoints are acce === TLS support -When the TLS is set for the default {project_name} server, the management interface will be accessible through HTTPS as well. +When the TLS is set for the default {project_name} server, by default the management interface will be accessible through HTTPS as well. The management interface can run only either on HTTP or HTTPS, not both as for the main server. +NOTE: If you do not want the management interface to use HTTPS, you may set the `http-management-scheme` option to `http`. + Specific {project_name} management interface options with the prefix `https-management-*` were provided for setting different TLS parameters for the management HTTP server. Their function is similar to their counterparts for the main HTTP server, for details see <@links.server id="enabletls" />. When these options are not explicitly set, the TLS parameters are inherited from the default HTTP server. diff --git a/docs/guides/server/mutual-tls.adoc b/docs/guides/server/mutual-tls.adoc index 933e6011ee73..5f3f48296fdd 100644 --- a/docs/guides/server/mutual-tls.adoc +++ b/docs/guides/server/mutual-tls.adoc @@ -4,7 +4,7 @@ <@tmpl.guide title="Configuring trusted certificates for mTLS" -summary="Learn how to configure Mutual TLS to verify clients that are connecting to {project_name}." +summary="Configure Mutual TLS to verify clients that are connecting to {project_name}." includedOptions="https-trust-store-* https-client-auth https-management-client-auth"> In order to properly validate client certificates and enable certain authentication methods like two-way TLS or mTLS, you can set diff --git a/docs/guides/server/outgoinghttp.adoc b/docs/guides/server/outgoinghttp.adoc index e68105738af1..f7642c0ce4c3 100644 --- a/docs/guides/server/outgoinghttp.adoc +++ b/docs/guides/server/outgoinghttp.adoc @@ -4,7 +4,7 @@ <@tmpl.guide title="Configuring outgoing HTTP requests" -summary="How to configure the client used for outgoing HTTP requests." +summary="Configure the client used for outgoing HTTP requests." includedOptions="truststore-*"> {project_name} often needs to make requests to the applications and services that it secures. {project_name} manages these outgoing connections using an HTTP client. This {section} shows how to configure the client, connection pool, proxy environment settings, timeouts, and more. @@ -17,7 +17,7 @@ to configure a {project_name} Truststore so that {project_name} is able to perfo == Client Configuration Command The HTTP client that {project_name} uses for outgoing communication is highly configurable. To configure the {project_name} outgoing HTTP client, enter this command: -<@kc.start parameters="--spi-connections-http-client-default-="/> +<@kc.start parameters="--spi-connections-http-client--default--="/> The following are the command options: @@ -90,7 +90,7 @@ For example, consider the following regex: You apply a regex-based hostname pattern by entering this command: -<@kc.start parameters="--spi-connections-http-client-default-proxy-mappings=\'.*\\\\.(google|googleapis)\\\\.com;http://www-proxy.acme.com:8080\'"/> +<@kc.start parameters="--spi-connections-http-client--default--proxy-mappings=\'.*\\\\.(google|googleapis)\\\\.com;http://www-proxy.acme.com:8080\'"/> The backslash character `\` is escaped again because micro-profile config is used to parse the array of mappings. diff --git a/docs/guides/server/pinned-guides b/docs/guides/server/pinned-guides index ad50f334494e..da8503aaffb2 100644 --- a/docs/guides/server/pinned-guides +++ b/docs/guides/server/pinned-guides @@ -19,4 +19,5 @@ management-interface importExport vault all-config -all-provider-config \ No newline at end of file +all-provider-config +update-compatibility \ No newline at end of file diff --git a/docs/guides/server/reverseproxy.adoc b/docs/guides/server/reverseproxy.adoc index 4d47306cdece..71978efa1be7 100644 --- a/docs/guides/server/reverseproxy.adoc +++ b/docs/guides/server/reverseproxy.adoc @@ -4,8 +4,8 @@ <#import "/templates/links.adoc" as links> <@tmpl.guide -title="Using a reverse proxy" -summary="Learn how to configure {project_name} together with a reverse proxy, api gateway, or load balancer." +title="Configuring a reverse proxy" +summary="Configure {project_name} with a reverse proxy, API gateway, or load balancer." includedOptions="proxy-* hostname hostname-admin http-relative-path"> Distributed environments frequently require the use of a reverse proxy. {project_name} offers several options to securely integrate with such environments. @@ -22,13 +22,13 @@ The port `8443` (or `8080` if HTTP is enabled) is used for the Admin UI, Account The port `9000` is used for management, which includes endpoints for health checks and metrics as described in the <@links.server id="management-interface"/> {section}. -You only need to proxy port `8443` (or `8080`) even when you use different host names for frontend/backend and administration as described at <@links.server id="configure-production"/>. You should not proxy port `9000` as health checks and metrics use those ports directly, and you do not want to expose this information to external callers. +You only need to proxy port `8443` (or `8080`) even when you use different host names for frontend/backend and administration as described at <@links.server id="configuration-production"/>. You should not proxy port `9000` as health checks and metrics use those ports directly, and you do not want to expose this information to external callers. == Configure the reverse proxy headers {project_name} will parse the reverse proxy headers based on the `proxy-headers` option which accepts several values: -* By default if the option is not specified, no reverse proxy headers are parsed. +* By default if the option is not specified, no reverse proxy headers are parsed. This should be used when no proxy is in use or with https passthrough. * `forwarded` enables parsing of the `Forwarded` header as per https://www.rfc-editor.org/rfc/rfc7239.html[RFC7239]. * `xforwarded` enables parsing of non-standard `X-Forwarded-*` headers, such as `X-Forwarded-For`, `X-Forwarded-Proto`, `X-Forwarded-Host`, and `X-Forwarded-Port`. @@ -38,7 +38,7 @@ For example: <@kc.start parameters="--proxy-headers forwarded"/> -WARNING: If either `forwarded` or `xforwarded` is selected, make sure your reverse proxy properly sets and overwrites the `Forwarded` or `X-Forwarded-*` headers respectively. To set these headers, consult the documentation for your reverse proxy. Misconfiguration will leave {project_name} exposed to security vulnerabilities. +WARNING: If either `forwarded` or `xforwarded` is selected, make sure your reverse proxy properly sets and overwrites the `Forwarded` or `X-Forwarded-*` headers respectively. To set these headers, consult the documentation for your reverse proxy. Do not use `forwarded` or `xforwarded` with https passthrough. Misconfiguration will leave {project_name} exposed to security vulnerabilities. Take extra precautions to ensure that the client address is properly set by your reverse proxy via the `Forwarded` or `X-Forwarded-For` headers. If this header is incorrectly configured, rogue clients can set this header and trick {project_name} into thinking the client is connected from a different IP address than the actual address. This precaution can be more critical if you do any deny or allow listing of IP addresses. @@ -84,12 +84,12 @@ From this point, it is beneficial if load balancer forwards all the next request The sticky session is not mandatory for the cluster setup, however it is good for performance for the reasons mentioned above. You need to configure your loadbalancer to stick over the AUTH_SESSION_ID cookie. The appropriate procedure to make this change depends on your loadbalancer. -If your proxy supports session affinity without processing cookies from backend nodes, you should set the `spi-sticky-session-encoder-infinispan-should-attach-route` option +If your proxy supports session affinity without processing cookies from backend nodes, you should set the `+spi-sticky-session-encoder--infinispan--should-attach-route+` option to `false` in order to avoid attaching the node to cookies and just rely on the reverse proxy capabilities. -<@kc.start parameters="--spi-sticky-session-encoder-infinispan-should-attach-route=false"/> +<@kc.start parameters="--spi-sticky-session-encoder--infinispan--should-attach-route=false"/> -By default, the `spi-sticky-session-encoder-infinispan-should-attach-route` option value is `true` so that the node name is attached to +By default, the `+spi-sticky-session-encoder--infinispan--should-attach-route+` option value is `true` so that the node name is attached to cookies to indicate to the reverse proxy the node that subsequent requests should be sent to. == Exposed path recommendations @@ -146,7 +146,7 @@ For example: == PROXY Protocol -The `proxy-protocol-enabled` option controls whether the server should use the HA PROXY protocol when serving requests from behind a proxy. When set to true, the remote address returned will be the one from the actual connecting client. +The `proxy-protocol-enabled` option controls whether the server should use the HA PROXY protocol when serving requests from behind a proxy. When set to true, the remote address returned will be the one from the actual connecting client. The value cannot be `true` when using the `proxy-headers` option. This is useful when running behind a compatible https passthrough proxy because the request headers cannot be manipulated. @@ -169,10 +169,9 @@ Client certificate lookup via a proxy header for X.509 authentication is conside and edge TLS termination. * If passthrough is not an option, implement the following security measures: ** Configure your network so that {project_name} is isolated and can accept connections only from the proxy. -** Make sure that the proxy overwrites the header that is configured in `spi-x509cert-lookup--ssl-client-cert` option. -** Keep in mind that any of the `spi-x509cert-*` options don't reflect the `proxy-trusted-addresses` option. -** Pay extra attention to the `spi-x509cert-lookup--trust-proxy-verification` setting. Make sure you enable it only if you can trust your proxy to verify the client certificate. - Setting `spi-x509cert-lookup--trust-proxy-verification=true` without the proxy verifying the client certificate chain will expose {project_name} to security vulnerability +** Make sure that the proxy overwrites the header that is configured in `spi-x509cert-lookup----ssl-client-cert` option. +** Pay extra attention to the `spi-x509cert-lookup----trust-proxy-verification` setting. Make sure you enable it only if you can trust your proxy to verify the client certificate. + Setting `spi-x509cert-lookup----trust-proxy-verification=true` without the proxy verifying the client certificate chain will expose {project_name} to security vulnerability when a forged client certificate can be used for authentication. ==== @@ -195,10 +194,10 @@ The server supports some of the most commons TLS termination proxies such as: To configure how client certificates are retrieved from the requests you need to: .Enable the corresponding proxy provider -<@kc.build parameters="--spi-x509cert-lookup-provider="/> +<@kc.build parameters="--spi-x509cert-lookup--provider="/> .Configure the HTTP headers -<@kc.start parameters="--spi-x509cert-lookup--ssl-client-cert=SSL_CLIENT_CERT --spi-x509cert-lookup--ssl-cert-chain-prefix=CERT_CHAIN --spi-x509cert-lookup--certificate-chain-length=10"/> +<@kc.start parameters="--spi-x509cert-lookup----ssl-client-cert=SSL_CLIENT_CERT --spi-x509cert-lookup----ssl-cert-chain-prefix=CERT_CHAIN --spi-x509cert-lookup---certificate-chain-length=10"/> When configuring the HTTP headers, you need to make sure the values you are using correspond to the name of the headers forwarded by the proxy with the client certificate information. @@ -222,6 +221,9 @@ to load additional certificates from headers `CERT_CHAIN_0` to `CERT_CHAIN_9` if |trust-proxy-verification | Enable trusting NGINX proxy certificate verification, instead of forwarding the certificate to {project_name} and verifying it in {project_name}. + +|cert-is-url-encoded +| Whether the forwarded certificate is url-encoded or not. In NGINX, this corresponds to the `$ssl_client_cert` and `$ssl_client_escaped_cert` variables. This can also be used for the Traefik PassTlsClientCert middleware, as it sends the client certficate unencoded. |=== === Configuring the NGINX provider diff --git a/docs/guides/server/update-compatibility.adoc b/docs/guides/server/update-compatibility.adoc new file mode 100644 index 000000000000..2913be824455 --- /dev/null +++ b/docs/guides/server/update-compatibility.adoc @@ -0,0 +1,179 @@ +<#import "/templates/guide.adoc" as tmpl> +<#import "/templates/kc.adoc" as kc> +<#import "/templates/links.adoc" as links> + +<@tmpl.guide +title="Checking if rolling updates are possible" +summary="Execute the update compatibility command to check if {project_name} supports a rolling update for a change in your deployment." +> + +Use the update compatibility command to determine if you can update your deployment with a rolling update strategy when enabling or disabling features or changing the {project_name} version, configurations or providers and themes. +The outcome shows whether a rolling update is possible or if a recreate update is required. + +In its current version, it shows that a rolling update is possible when the {project_name} version is the same for the old and the new version. +Future versions of {project_name} might change that behavior to use additional information from the configuration, the image and the version to determine if a rolling update is possible. + +[NOTE] +==== +In the next iteration of this feature, it is possible to use rolling update strategy also when updating to the following patch release of {project_name}. +Refer to <> section for more details. +==== + +This is fully scriptable, so your update procedure can use that information to perform a rolling or recreate strategy depending on the change performed. +It is also GitOps friendly, as it allows storing the metadata of the previous configuration in a file. Use this file in a CI/CD pipeline with the new configuration to determine if a rolling update is possible or if a recreate update is needed. + +If you are using the {project_name} Operator, continue to the <@links.operator id="rolling-updates" /> {section} and the `Auto` strategy for more information. + +== Supported update strategies + +Rolling Update:: +In this guide, a rolling update is an update that can be performed with zero downtime for your deployment, which consists of at least two nodes. +Update your {project_name} one by one; shut down one of your old deployment nodes and start a new deployment node. +Wait until the new node's start-up probe returns success before proceeding to the next {project_name} node. See {section} <@links.observability id="health"/> for details on how to enable and use the start-up probe. + +Recreate Update:: +A recreate update is not compatible with zero-downtime and requires downtime to be applied. +Shut down all nodes of the cluster running the old version before starting the nodes with the new version. + +== Determining the update strategy for an updated configuration + +To determine if a rolling update is possible: + +1. Run the update compatibility command to generate the required metadata with the old configuration. +2. Check the metadata with the new configuration to determine the update strategy. + +NOTE: If you do not use `--optimized` keep in mind that an `update` command may implicitly create or update an optimized build for you - if you are running the command from the same machine as a server instance, this may impact the next start of your server. + +[WARNING] +==== +The `check` command currently offers only a limited functionality. At the moment, it takes into consideration only the version of {project_name} and the embedded Infinispan to determine if a rolling update is possible. +If those are unchanged, it reports that a rolling update is possible. + +The current version does not yet verify configuration changes and assumes all configuration changes are eligible for a rolling update. +The same applies to changes to custom extensions and themes. + +A good use case when to use this is, for example, when you want to do a rolling update when you change the {project_name} theme or your custom extensions, and only want run recreate update when the version of {project_name} changes which does not yet allow a rolling update. + +While consumers of these commands should know the limitations that exist today, they should not rely on the internal behavior or the structure of the metadata file. +Instead, they should rely only on the exit code of the `check` command to benefit from future enhancements on the internal logic to determine when a rolling update is possible. +==== + +=== Generating the Metadata + +To generate the metadata, execute the following command using the same {project_name} version and configuration options: + +.Generate and save the metadata from the current deployment. +<@kc.updatecompatibility parameters="metadata --file=/path/to/file.json"/> + +This command accepts all options used by the `start` command. +The command displays the metadata, in JSON format, in the console for debugging purposes. +The `--file` parameter allows you to save the metadata to a file. +Use this file with the subsequent `check` command. + +[WARNING] +==== +Ensure that all configuration options, whether set via environment variables or CLI arguments, are included when running the above command. + +Omitting any configuration options results in incomplete metadata, and could lead to a wrong reported result in the next step. +==== + +=== Checking the Metadata + +This command checks the metadata generated by the previous command and compares it with the current configuration and {project_name} version. +If you are updating to a new {project_name} version, this command must be executed with the new version. + +.Check the metadata from a previous deployment. +<@kc.updatecompatibility parameters="check --file=/path/to/file.json"/> + +[WARNING] +==== +* Ensure that all configuration options, whether set via environment variables or CLI arguments, are included when running this command. + +* Verify that the correct {project_name} version is used. + +Failure to meet these requirements results in an incorrect outcome. +==== + +The command prints the result to the console. +For example, if a rolling update is possible, it displays: + +.Rolling Update possible message +[source,bash] +---- +[OK] Rolling Update is available. +---- + +If no rolling update is possible, the command provides details about the incompatibility: + +.Rolling Update not possible message +[source,bash] +---- +[keycloak] Rolling Update is not available. 'keycloak.version' is incompatible: 26.2.0 -> 26.2.1 #<1> +---- +<1> In this example, the Keycloak version `26.2.0` is not compatible with version `26.2.1` and a rolling update is not possible. + +[NOTE] +==== +In the next iteration of this feature, it is possible to use rolling update strategy also when updating to the following patch release of {project_name}. +Refer to <> section for more details. +==== + +*Command exit code* + +Use the command's exit code to determine the update type in your automation pipeline: + +|=== +|Exit Code |Description + +m|0 +|Rolling Update is possible. + +m|1 +|Unexpected error occurred (such as the metadata file is missing or corrupted). + +m|2 +|Invalid CLI option. + +m|3 +|Rolling Update is not possible. +The deployment must be shut down before applying the new configuration. + +m|4 +|Rolling Update is not possible. +The feature `rolling-updates` is disabled. +|=== + + +[[rolling-updates-for-patch-releases]] +== Rolling updates for patch releases + +WARNING: This behavior is currently in preview mode, and it is not recommended for use in production. + +It is possible to configure the {project_name} compatibility command to allow rolling updates when upgrading to a newer patch version in the same `+major.minor+` release stream. + +To enable this behavior for compatibility check command enable feature `rolling-updates:v2` as shown in the following example. +<@kc.updatecompatibility parameters="check --file=/path/to/file.json --features=rolling-updates:v2"/> + +Note there is no change needed when generating metadata using `metadata` command. + +Recommended Configuration: + +* Enable sticky sessions in your loadbalancer to avoid users bouncing between different versions of {project_name} as this could result in users needing to refresh their Account Console and Admin UI multiple times while the upgrade is progressing. + +Supported functionality during rolling updates: + +* Users can log in and log out for OpenID Connect clients. + +* OpenID Connect clients can perform all operations, for example, refreshing tokens and querying the user info endpoint. + +Known limitations: + +* If there have been changes to the Account Console or Admin UI in the patch release, and the user opened the Account Console or Admin UI before or during the upgrade, the user might see an error message and be asked to reload the application while navigating in browser during or after the upgrade. + +* If the two patch releases of {project_name} use different versions of the embedded Infinispan, no rolling update of {project_name} be performed. + +== Further reading + +The {project_name} Operator uses the functionality described above to determine if a rolling update is possible. See the <@links.operator id="rolling-updates" /> {section} and the `Auto` strategy for more information. + + diff --git a/docs/guides/server/vault.adoc b/docs/guides/server/vault.adoc index 91ea96b47409..1523eacf7a10 100644 --- a/docs/guides/server/vault.adoc +++ b/docs/guides/server/vault.adoc @@ -3,8 +3,7 @@ <@tmpl.guide title="Using a vault" -summary="Learn how to use and configure a vault in {project_name}" -priority=30 +summary="Configure and use a vault in {project_name}." includedOptions="vault vault-*"> {project_name} provides two out-of-the-box implementations of the Vault SPI: a plain-text file-based vault and Java KeyStore-based vault. diff --git a/docs/guides/templates/guide.adoc b/docs/guides/templates/guide.adoc index 0fb9c840cbc1..95e54aad7c0f 100644 --- a/docs/guides/templates/guide.adoc +++ b/docs/guides/templates/guide.adoc @@ -26,8 +26,6 @@ endif::[] <#nested> <#if includedOptions?has_content> -== Relevant options - -<@opts.list options=ctx.options.getOptions(includedOptions, excludedOptions, deniedCategories) anchor=false> +<@opts.printRelevantOptions includedOptions=includedOptions excludedOptions=excludedOptions deniedCategories=deniedCategories> diff --git a/docs/guides/templates/kc.adoc b/docs/guides/templates/kc.adoc index 826dcd1571cd..8db47c0e08de 100644 --- a/docs/guides/templates/kc.adoc +++ b/docs/guides/templates/kc.adoc @@ -46,3 +46,10 @@ bin/kc.[sh|bat] import ${parameters} bin/kc.[sh|bat] bootstrap-admin ${parameters} ---- + +<#macro updatecompatibility parameters> +[source,bash] +---- +bin/kc.[sh|bat] update-compatibility ${parameters} +---- + diff --git a/docs/guides/templates/options.adoc b/docs/guides/templates/options.adoc index d73ccf7ceb28..e53dbd5f768a 100644 --- a/docs/guides/templates/options.adoc +++ b/docs/guides/templates/options.adoc @@ -5,6 +5,17 @@ +<#macro includeOptions includedOptions excludedOptions="" deniedCategories=""> +<@list options=ctx.options.getOptions(includedOptions, excludedOptions, deniedCategories) anchor=false> + + +<#macro printRelevantOptions includedOptions excludedOptions="" deniedCategories=""> +== Relevant options + +<@includeOptions includedOptions=includedOptions excludedOptions=excludedOptions deniedCategories=deniedCategories> +<#nested> + + <#macro list options buildIcon=true anchor=true> [cols="12a,4",role="options"] |=== @@ -12,7 +23,7 @@ <#list options as option> | -[.options-key]#``${option.key}``# <#if buildIcon><#if option.build>[.none]#icon:tools[role=options-build]# +[.options-key]#`+${option.key}+`# <#if buildIcon><#if option.build>[.none]#icon:tools[role=options-build]# [.options-description]#${option.description}# @@ -20,8 +31,8 @@ -- <#if option.descriptionExtended?has_content>[.options-description-extended]#${option.descriptionExtended!}# -*CLI:* `${option.keyCli}` + -*Env:* `${option.keyEnv}` +*CLI:* `+${option.keyCli}+` + +*Env:* `+${option.keyEnv}+` -- <#if option.enabledWhen?has_content> diff --git a/docs/guides/ui-customization/avatars.adoc b/docs/guides/ui-customization/avatars.adoc new file mode 100644 index 000000000000..1709323c5a2b --- /dev/null +++ b/docs/guides/ui-customization/avatars.adoc @@ -0,0 +1,29 @@ +<#import "/templates/guide.adoc" as tmpl> +<#import "/templates/links.adoc" as links> + +<@tmpl.guide +title="Using Avatars" +priority=50 +summary="Use avatars in the Admin console and Account console."> + +Both the admin and account consoles allow use of an avatar to personalize the user experience. {project_name} supports avatars using the oidc standard `picture` claim. + +This `picture` claim should have a URI as its value. The URI should point to the avatar meant to be displayed in the masthead section of the admin console or account console. + +== Setting a picture attribute from the admin console +The simplest way to allow users to specify the avatar URI is to add a `picture` attribute in User profile. Just go to the admin console and navigate to the `Realm Settings -> User profile` tab. + +Here is an example setup of the `picture` attribute: + +image::ui-customization/picture-attribute-general-settings.png[caption="",title="User profile picture attribute setup in admin console"] + +== Avatar success +Here is the result in account console once the URI is saved for the picture attribute: + +image::ui-customization/account-console-with-avatar.png[caption="",title="Avatar shown in account console"] + +== Important warning +CAUTION: Allowing users to specify their own URI could lead to security concerns. An avatar can contain malware. So ensure that images come from a trusted source. One practical approach is to make sure that the `picture` has a regular expression validator to restrict the URI. + +image::ui-customization/avatar-validation.png[caption="",title="RegExp validator for User profile picture attribute"] + \ No newline at end of file diff --git a/docs/guides/ui-customization/creating-your-own-console.adoc b/docs/guides/ui-customization/creating-your-own-console.adoc new file mode 100644 index 000000000000..336595cbcdb9 --- /dev/null +++ b/docs/guides/ui-customization/creating-your-own-console.adoc @@ -0,0 +1,200 @@ +<#import "/templates/guide.adoc" as tmpl> +<#import "/templates/links.adoc" as links> + +<@tmpl.guide +title="Creating your own Console" +priority=70 +summary="Learn to create your own version of Admin Console or Account Console."> + +The Admin Console and Account Console are based on React. +To create your own version of these consoles, you can use these React based npm packages: + +* `@keycloak/keycloak-admin-ui`: This is the base theme for the Admin Console. +* `@keycloak/keycloak-account-ui`: This is the base theme for the Account Console. + +Both packages are available in the public npm repository. + +The goal of these packages is to allow the developer to create new React-based consoles that will continue to function properly with new versions of {project_name}. + +Additionally, {project_name} provides a tool called `create-keycloak-theme` that helps you get started. Full documentation for this tool can be found https://github.com/keycloak/keycloak/tree/main/js/apps/create-keycloak-theme[here]. + +== Getting started + +First, we install the `create-keycloak-theme` tool. Note that we are using `pnpm`, but plain `npm` should work as well. +[source,bash] +---- +pnpm install -g create-keycloak-theme +---- + +Next, we use the tool to create a full build environment for our new console. In this example, we create a new Account Console, but everything that follows applies to creation of an Admin Console. You would just change the `-t` parameter in the following command. +[source,bash] +---- +pnpm create keycloak-theme my-theme -t account +---- + +Next, we install npm libraries. +[source,bash] +---- +cd my-theme +pnpm install +---- + +Now, we start up the development environment, which uses https://vite.dev/guide/[Vite]. This will allow code changes to immediately show up in your browser. +[source,bash] +---- +pnpm run dev +---- + +Then we start a keycloak server. It will be automatically downloaded, started, and linked to the development environment. +[source,bash] +---- +pnpm run start-keycloak +---- + +Now everything is set up and we are ready to start hacking. To see a customized version of the Account Console, point your browser to http://localhost:8080/realms/master/account. + +The user name and password are both `admin`. Once logged in, you will see the new Account Console rendered: + +image::ui-customization/custom-account-console.png[title="Custom Account Console"] + +[NOTE] +If you need to use the Admin Console, go to http://localhost:8080 + +== Adding a new page + +The first thing we want to do is add a new page to our custom Account Console. You will see that there is already a template for this, `/src/MyPage.tsx`. + +image::ui-customization/myPage.png[title="MyPage.tsx"] + +There are a few parts of the code to take note of. First, there is the import statement starting with line 1: +[source,javascript] +---- +import { + AccountEnvironment, + Page, + UserRepresentation, + getPersonalInfo, + savePersonalInfo, + useAlerts, + useEnvironment, + usePromise, +} from "@keycloak/keycloak-account-ui"; +---- + +These imports of components and functions are from the aformentioned library `@keycloak/keycloak-account-ui`. This library will stay up to date with new versions of {project_name}. Therefore, you can rely on this library across releases and not worry that your version of Account Console will break. + +For a full list of the exports from this library, see the https://github.com/keycloak/keycloak/blob/main/js/apps/account-ui/src/index.ts[source code.] + +Incidentally, if you are instead creating an Admin Console, see this https://github.com/keycloak/keycloak/blob/main/js/apps/admin-ui/src/index.ts[source code.] + +You may notice that there is already a link in the lefthand navigation for `MyPage.tsx`, which shows up as `myPage`. If you click on that link, you will see `MyPage.tsx` in action. This was accomplished in the `routes.tsx` file. + +For `myPage`, the important parts in `routes.tsx` are as follows: +[source,javascript] +---- +import { MyPage } from "./MyPage"; +---- +[source,javascript,subs="specialcharacters,quotes"] +---- +export const **MyPageRoute**: RouteObject = { + path: "myPage", + element: , +}; + +export const RootRoute: RouteObject = { + path: decodeURIComponent(new URL(environment.baseUrl).pathname), + element: , + errorElement: <>Error, + children: [ + PersonalInfoRoute, + DeviceActivityRoute, + LinkedAccountsRoute, + SigningInRoute, + ApplicationsRoute, + GroupsRoute, + ResourcesRoute, + **MyPageRoute**, + ], +}; +---- + +The last thing to notice is that in your development environment, the navigation link to `MyPage.tsx` is rendered as "myPage". We would rather see this rendered as "My Page". The good news is that when you do the production build of your custom Account Console, it will be properly rendered. `myPage` is a localization key that will be resolved to the proper language at runtime. To see the keys and values for English, look in `\maven-resources\theme\my-account\account\messages\messages_en.properties`. + +== Modifying an existing page + +You may wish to modify one of the Account Console pages that already exists in {project_name}. One approach is to simply re-create the entire page yourself using the method in the previous section. + +But you may instead wish to start with the source code from the Keycloak project. If that is your choice, you can find the source code for all the pages in the Keycloak GitHub project https://github.com/keycloak/keycloak/tree/main/js/apps/account-ui/src[here]. + +As an example, we will use the Device Activity page (`DeviceActivity.tsx`). Let's assume that we want to remove the details from each device. We start with a screen that looks like this: + +image::ui-customization/device-activity-before.png[title="We want to remove the indicated details from DeviceActivity.tsx"] + +We will need to modify the original source code and replace the page found in `@keycloak/keycloak-account-ui`. + +First, download DeviceActivity.tsx from https://github.com/keycloak/keycloak/blob/main/js/apps/account-ui/src/account-security/DeviceActivity.tsx[GitHub]. + +Then remove the details block from template and remove any unused imports. + +In addition to this, there are some other changes you will need to make in order to allow your new page to work properly with your custom Account Console. + +These imports will now come from the library `@keycloak/keycloak-account-ui`: + +[source,javascript,subs="specialcharacters,quotes"] +---- +import { deleteSession, getDevices } from "../api/methods"; +import { + ClientRepresentation, + DeviceRepresentation, + SessionRepresentation, +} from "../api/representations";*/ +import { Page } from "../components/page/Page"; +---- + +Here is the new import statement: +[source,javascript] +---- +import { + AccountEnvironment, + Page, + usePromise, + DeviceRepresentation, + SessionRepresentation, + deleteSession, + getDevices, + useAlerts, + useEnvironment, // <---- moved from ui-shared +} from "@keycloak/keycloak-account-ui"; +---- + +Note that the `useEnvironment' function is also added to this import. + +The last problem to take care of is the PatternFly icons. These icons are not referenced anywhere else in your project. So you will need to add this as a dependency. + +[source,bash] +---- +pnpm install @patternfly/react-icons +---- + +Finally, we need to change `routes.tsx` in order to replace the old Device Activity page with our new version. This is just a matter of removing the `DeviceActivity` import from `@keycloak/keycloak-account-ui` and adding an import that points to our new source file: + +[source,javascript] +---- +import { DeviceActivity } from "./DeviceActivity"; +---- + +Once all this is complete, your new Device Activity page will look like this: + +image::ui-customization/device-activity-after.png[title="DeviceActivity.tsx with details removed"] + +== Deploying your custom console + +The last step is to build and deploy your custom console. For this, you may need to https://maven.apache.org/[install Maven]. With Maven installed, run: + +[source,bash] +---- +mvn package +---- + +If the build is successful, you will see a jar created in the `/target` directory. This jar can now be deployed to production by copying it to the `/providers` directory of the {project_name} server. + \ No newline at end of file diff --git a/docs/guides/ui-customization/index.adoc b/docs/guides/ui-customization/index.adoc new file mode 100644 index 000000000000..57954bd13407 --- /dev/null +++ b/docs/guides/ui-customization/index.adoc @@ -0,0 +1,12 @@ += Keycloak UI customization guide + +include::../attributes.adoc[] + +<#list ctx.guides as guide> +:links_ui_customization_${guide.id}_name: ${guide.title} +:links_ui_customization_${guide.id}_url: #${guide.id} + + +<#list ctx.guides as guide> +include::${guide.template}[leveloffset=+1] + diff --git a/docs/guides/ui-customization/introduction.adoc b/docs/guides/ui-customization/introduction.adoc new file mode 100644 index 000000000000..837e18d29522 --- /dev/null +++ b/docs/guides/ui-customization/introduction.adoc @@ -0,0 +1,14 @@ +<#import "/templates/guide.adoc" as tmpl> +<#import "/templates/links.adoc" as links> + +<@tmpl.guide +title="Introduction" +priority=10 +summary="Learn how to customize the user interfaces."> + +{project_name} includes user interfaces (UIs) for the login page, Admin Console, and Account Console. It also has a "welcome" screen used for the first administrator of the Admin Console. + +You can customize, extend, and modify these UIs to accommodate many use cases on your production systems. At a minimum, you will need to change the logos and colors to match your corporate identity. Also, you may modify the UIs to implement new functionality. + +This guide provides all the information needed to customize the {project_name} UIs for your needs. + \ No newline at end of file diff --git a/docs/guides/ui-customization/localization.adoc b/docs/guides/ui-customization/localization.adoc new file mode 100644 index 000000000000..a31c0052cf94 --- /dev/null +++ b/docs/guides/ui-customization/localization.adoc @@ -0,0 +1,100 @@ +<#import "/templates/guide.adoc" as tmpl> +<#import "/templates/links.adoc" as links> + +<@tmpl.guide +title="Localization" +priority=40 +summary="Learn how to localize strings in the UIs."> + +{project_name} supports localized text throughout all user interfaces. + +.Prerequisites + +* You enable internationalization for a realm in the `Realm settings` section of the Admin Console. See {adminguide_link}#enabling-internationalization[Enabling internationalization]. + +== Localizing messages in a theme + +Text in the templates is loaded from message bundles. When a theme extends another theme, the child theme inherits all messages from the parent's message bundle. You can +override individual messages by adding `/messages/messages_en.properties` to your theme. + +For example, to replace `Username` on the login form with `Your Username` for the `mytheme`, create the file +`themes/mytheme/login/messages/messages_en.properties` with the following content: + +[source] +---- +usernameOrEmail=Your Username +---- + +Within a message, values like `{0}` and `{1}` are replaced with arguments when the message is used. For example {0} in `Log in to {0}` is replaced with the name +of the realm. + +Texts of these message bundles can be overwritten by realm-specific values, which are manageable by the UI and API. + +== Adding a language to a theme + +.Procedure + +. Create the file `/messages/messages_.properties` in the directory of your theme. + +. Add this file to the `locales` property in `/theme.properties`. +For a language to be available to users in a realm, the login, account, and email theme types must support the language, so you need to add your language for those theme types. ++ +For example, to add Norwegian translations to the `mytheme` theme, create the file `themes/mytheme/login/messages/messages_no.properties` with the +following content: ++ +[source] +---- +usernameOrEmail=Brukernavn +password=Passord +---- ++ +If you omit a translation for messages, those messages appear in English. + +. Edit `themes/mytheme/login/theme.properties` and add: ++ +[source] +---- +locales=en,no +---- + +. Make the same changes to the `account` and `email` theme types. To make these changes, create `themes/mytheme/account/messages/messages_no.properties` and +`themes/mytheme/email/messages/messages_no.properties`. If you leave these files empty, English messages appear. + +. Copy `themes/mytheme/login/theme.properties` to `themes/mytheme/account/theme.properties` and `themes/mytheme/email/theme.properties`. + +. Add a translation for the language selector by adding a message to the English translation. Add the following to +`themes/mytheme/account/messages/messages_en.properties` and `themes/mytheme/login/messages/messages_en.properties`: ++ +[source] +---- +locale_no=Norsk +---- + +By default, message properties files should be encoded using UTF-8. +{project_name} falls back to ISO-8859-1 handling if it cannot read the contents as UTF-8. +Unicode characters can be escaped as described in Java's documentation for https://docs.oracle.com/en/java/javase/17/docs/api/java.base/java/util/PropertyResourceBundle.html[PropertyResourceBundle]. +Previous versions of {project_name} had support for specifying the encoding in the first line with a comment such as `# encoding: UTF-8`, which is no longer supported. + +[role="_additional-resources"] +.Additional resources +* For details on how the current locale is selected, see {developerguide_link}#_locale_selector[Locale Selector]. + +== Overriding localized text for an entire realm + +You also have the option to declare translations that take effect for an entire realm. You specify key/value pairs in the realm settings. Any key specified in this way overrides the key/value pairs deployed with a theme. + +CAUTION: In most cases, using realm overrides is not the recommended way to achieve localization in {project_name}. Consider carefully if you want every theme in your realm to use a key/value pair declared as a realm override. + +.Procedure to add a realm override + +. Log into the Admin Console. +. Select your realm. +. Click *Realm Settings* from the menu. +. Click on the *Localization* tab. +. Click on the *Realm overrides* subtab. +. Select a language from the option list. +. Click *Add translation*. +. Create a key/value pair from the modal dialog. + +Notice another subtab called *Effective message bundles*. This subtab provides a tool to query key/value pairs for a combination of theme, language, and theme type. You can use this tool to test and make sure your realm overrides took effect. + \ No newline at end of file diff --git a/docs/guides/ui-customization/quick-theme.adoc b/docs/guides/ui-customization/quick-theme.adoc new file mode 100644 index 000000000000..ea00fc66a96a --- /dev/null +++ b/docs/guides/ui-customization/quick-theme.adoc @@ -0,0 +1,63 @@ +<#import "/templates/guide.adoc" as tmpl> +<#import "/templates/links.adoc" as links> + +<@tmpl.guide +title="Customizing with Quick Theme" +priority=30 +preview="true" +summary="Learn how to customize the consoles and login screens with the Quick Theme utility."> + +"Quick Theme" is an experimental feature which allows you to quickly and easily create themes with new logos and colors. Often, this is all you need to facilitate branding and skinning for the Account Console, Admin Console, and login page. + +== Enabling Quick Theme + +Because the "Quick Theme" feature is experimental, you will need to enable this feature using a feature flag as you start the server. + +For example: +[source,bash] +---- +bin/kc.[sh|bat] start --features=quick-theme +---- + +== Setting logos and colors +The "Quick Theme" tool is shown below. The theme you create will extend the default {project_name} theme. Therefore, when you want to create a new theme with the tool you will start out with the default colors and images. + +image::ui-customization/quick-theme-overview.png[title="Quick Theme"] + +As shown below, when you change a color or image, you get a preview of how your colors and images will look together for various elements of your theme. The color options correspond to https://www.patternfly.org/developer-resources/global-css-variables/[PatternFly global CSS variables], which are used by {project_name} to create themes. + +image::ui-customization/simple-quick-theme-changes.png[title="Preview after changes"] + +[TIP] +==== +The "Quick Theme" tool uses the default color chooser from your browser. Some color choosers, such as in Chrome, have a handy tool to find a color from something on your screen. Notice the "dropper" icon in the lefthand side of the dialog. + +image::ui-customization/color-chooser.png[alt="Color chooser with dropper", title="Color chooser with the dropper tool"] + +This "dropper" tool can be used to select your background color to match a color in your logo. + +.Find a color with the dropper tool +image::ui-customization/easy-theme-find-color.png[alt="Find a color with the dropper tool", title="Find a color with the dropper tool"] +==== + +When your new theme is complete, click *Download theme JAR* to download a theme JAR that is ready for deployment. + +=== Deploying your theme +Once you have downloaded your theme's archive jar, it's time to deploy and try it out. To deploy the archive to {project_name}, add it to the `providers/` directory of {project_name} and restart the server if it is already running. + +CAUTION: For security reasons, never deploy a theme unless you are confident of its origin. An image can be a possible attack vector. This issue is the reason that {project_name} does not allow automatic deployment of a theme through the Admin Console. Only those administrators with file access to your production server should deploy a theme in production. + +Another deployment option is to "unjar" the archive into the `themes/` directory. With this option, you explore the theme and make manual changes. + +Many compression utilities or "zip tools" support `jar` files and you can use these to "unjar" the file. If no such utility is available, you can use the `jar` utility that comes with the java JDK. + +[source,bash] +---- +jar xf quick-theme.jar +---- + +=== Trying out your new theme +Your theme includes the images and colors you provided. They appear throughout the Account Console, Admin Console, and login page. + +Once your "Quick Theme" archive jar is deployed, you can fully test it using the procedures shown in the <>. Essentially, you just need to choose your new theme on the Realm settings --> Themes tab. + diff --git a/docs/guides/ui-customization/themes-react.adoc b/docs/guides/ui-customization/themes-react.adoc new file mode 100644 index 000000000000..c403e42dea5f --- /dev/null +++ b/docs/guides/ui-customization/themes-react.adoc @@ -0,0 +1,69 @@ +<#import "/templates/guide.adoc" as tmpl> +<#import "/templates/links.adoc" as links> + +<@tmpl.guide +title="Using the npm UI packages" +priority=80 +summary="Learn how to use UI modules in your own application."> + +A final approach to customization is to just take pieces of the Admin Console or Account Console and use it in your own React application. + +To fully customize these consoles, you can use the aformentioned React based npm packages. +Two packages exist: + +* `@keycloak/keycloak-admin-ui`: This is the base theme for the Admin Console. +* `@keycloak/keycloak-account-ui`: This is the base theme for the Account Console. +Both packages are available in the public npm repository. + +== Installing the packages + +To install the packages, run the following command: + +[source,bash] +---- +pnpm install @keycloak/keycloak-account-ui +---- + +== Using the packages + +To use these pages, you add KeycloakProvider in your component hierarchy to choose the client, realm, and URL that you need. + +[source,javascript] +---- +import { KeycloakProvider } from "@keycloak/keycloak-ui-shared"; + +//... + + + {/* rest of you application */} + +---- + +== Translating the pages + +The pages are translated using the `i18next` library. +You can set it up as described on the https://react.i18next.com/[react-i18next Website]. +If you want to use the translations that are provided, add i18next-http-backend to your project and add the following: + +[source,javascript] +---- +backend: { + loadPath: `http://localhost:8080/resources/master/account/{lng}}`, + parse: (data: string) => { + const messages = JSON.parse(data); + + const result: Record = {}; + messages.forEach((v) => (result[v.key] = v.value)); //need to convert to record + return result; + }, +}, +---- + +== Using the pages + +To see how to further integrate the pages, we recommend that you take a look at the output of the tool in the <> chapter. + \ No newline at end of file diff --git a/docs/guides/ui-customization/themes.adoc b/docs/guides/ui-customization/themes.adoc new file mode 100644 index 000000000000..a7cf860db926 --- /dev/null +++ b/docs/guides/ui-customization/themes.adoc @@ -0,0 +1,417 @@ +<#import "/templates/guide.adoc" as tmpl> +<#import "/templates/links.adoc" as links> + +<@tmpl.guide +title="Working with themes" +priority=20 +summary="Understand how to create and configure themes."> + +{project_name} provides theme support for web pages and emails. This allows customizing the look and feel of end-user facing pages so they can be +integrated with your applications. + +image::ui-customization/login-sunrise.png[caption="",title="Login page with sunrise example theme"] + +== Theme types + +A theme can provide one or more types to customize different aspects of {project_name}. The types available are: + +* Account - Account Console +* Admin - Admin Console +* Email - Emails +* Login - Login forms +* Welcome - Welcome page + +== Configuring a theme + +All theme types, except welcome, are configured through the Admin Console. + +.Procedure + +. Log into the Admin Console. +. Select your realm from the drop-down box in the top left corner. +. Click *Realm Settings* from the menu. +. Click the *Themes* tab. ++ +NOTE: To set the theme for the `master` Admin Console you need to set the Admin Console theme for the `master` realm. ++ +. To see the changes to the Admin Console refresh the page. + +. Change the welcome theme by using the `spi-theme--welcome-theme` option. + +. For example: ++ +[source,bash] +---- +bin/kc.[sh|bat] start --spi-theme--welcome-theme=custom-theme +---- + +[[_default-themes]] +== Default themes + +{project_name} comes bundled with default themes in the JAR file `keycloak-themes-{project_versionMvn}.jar` inside the server distribution. +The server's root `themes` directory does not contain any themes by default, but it contains a README file with some additional details about the default themes. +To simplify upgrading, do not edit the bundled themes directly. Instead create your own theme that extends one of the bundled themes. + +[[_creating-a-theme]] +== Creating a theme + +A theme consists of: + +* HTML templates (https://freemarker.apache.org/[Freemarker Templates]) +* Images +* Message bundles +* Stylesheets +* Scripts +* Theme properties + +Unless you plan to replace every single page you should extend another theme. Most likely you will want to extend some existing theme. Alternatively, if you intend to provide your own implementation of the admin or account console, +consider extending the `base` theme. The `base` theme consists of a message bundle and therefore such implementation needs to start from scratch, including implementation of the main `index.ftl` Freemarker template, but it can leverage existing translations from the message bundle. + +When extending a theme you can override individual resources (templates, stylesheets, etc.). If you decide to override HTML templates bear in mind that you may +need to update your custom template when upgrading to a new release. + +While creating a theme it's a good idea to disable caching as this makes it possible to edit theme resources directly from the `themes` directory without +restarting {project_name}. + +.Procedure + +. Run Keycloak with the following options: ++ +[source,bash] +---- +bin/kc.[sh|bat] start --spi-theme--static-max-age=-1 --spi-theme--cache-themes=false --spi-theme--cache-templates=false +---- + +. Create a directory in the `themes` directory. ++ +The name of the directory becomes the name of the theme. For example to +create a theme called `mytheme` create the directory `themes/mytheme`. + +. Inside the theme directory, create a directory for each of the types your theme is going to provide. ++ +For example, to add the login type to the `mytheme` theme, create the directory `themes/mytheme/login`. + +. For each type create a file `theme.properties` which allows setting some configuration for the theme. ++ +For example, to configure the theme `themes/mytheme/login` to extend the `base` theme and import some common resources, create the file `themes/mytheme/login/theme.properties` with following contents: ++ +[source] +---- +parent=base +import=common/keycloak +---- ++ +You have now created a theme with support for the login type. + +. Log into the Admin Console to check out your new theme +. Select your realm +. Click *Realm Settings* from the menu. +. Click on the *Themes* tab. +. For *Login Theme* select *mytheme* and click *Save*. +. Open the login page for the realm. ++ +You can do this either by logging in through your application or by opening the Account Console (`/realms/{realm-name}/account`). + +. To see the effect of changing the parent theme, set `parent=keycloak` in `theme.properties` and refresh the login page. + +[NOTE] +==== +Be sure to re-enable caching in production as it will significantly impact performance. +==== +[NOTE] +==== +If you want to manually delete the content of the themes cache, you can do so by deleting the `data/tmp/kc-gzip-cache` directory of the server distribution. +It can be useful for instance if you redeployed custom providers or custom themes without disabling themes caching in the previous server executions. +==== + +=== Theme properties + +Theme properties are set in the file `/theme.properties` in the theme directory. + +* parent - Parent theme to extend +* import - Import resources from another theme +* common - Override the common resource path. The default value is `common/keycloak` when not specified. This value would be used as value of suffix of ${r"`${url.resourcesCommonPath}`"}, which is used typically in freemarker templates (prefix of ${r"`${url.resoucesCommonPath}`"} value is theme root uri). +* styles - Space-separated list of styles to include +* locales - Comma-separated list of supported locales + +There are a list of properties that can be used to change the css class used for certain element types. For a list of these properties look at the theme.properties +file in the corresponding type of the keycloak theme (`themes/keycloak//theme.properties`). + +You can also add your own custom properties and use them from custom templates. + +When doing so, you can substitute system properties or environment variables by using these formats: + +* ${r"`${some.system.property}`"} - for system properties +* ${r"`${env.ENV_VAR}`"} - for environment variables. + +A default value can also be provided in case the system property or the environment variable is not found with ${r"`${foo\:defaultValue}`"}. + +NOTE: If no default value is provided and there's no corresponding system property or environment variable, then nothing is replaced and you end up with the format in your template. + +Here's an example of what is possible: + +[source,properties] +---- +javaVersion=${r"${java.version}"} + +unixHome=${r"${env.HOME:Unix home not found}"} +windowsHome=${r"${env.HOMEPATH:Windows home not found}"} +---- + +[[_theme_stylesheet]] +=== Add a stylesheet to a theme + +You can add one or more stylesheets to a theme. + +.Procedure + +. Create a file in the `/resources/css` directory of your theme. + +. Add this file to the `styles` property in `theme.properties`. ++ +For example, to add `styles.css` to the `mytheme`, create `themes/mytheme/login/resources/css/styles.css` with the following content: ++ +[source,css] +---- +.login-pf body { + background: DimGrey none; +} +---- + +. Edit `themes/mytheme/login/theme.properties` and add: ++ +[source] +---- +styles=css/styles.css +---- + +. To see the changes, open the login page for your realm. ++ +You will notice that the only styles being applied are those from your custom stylesheet. + +. To include the styles from the parent theme, load the styles from that theme. Edit `themes/mytheme/login/theme.properties` and change `styles` to: ++ +[source] +---- +styles=css/login.css css/styles.css +---- ++ +NOTE: To override styles from the parent stylesheets, ensure that your stylesheet is listed last. + +=== Adding a script to a theme + +You can add one or more scripts to a theme. + +.Procedure + +. Create a file in the `/resources/js` directory of your theme. + +. Add the file to the `scripts` property in `theme.properties`. ++ +For example, to add `script.js` to the `mytheme`, create `themes/mytheme/login/resources/js/script.js` with the following content: ++ +[source,javascript] +---- +alert('Hello'); +---- ++ +Then edit `themes/mytheme/login/theme.properties` and add: ++ +[source] +---- +scripts=js/script.js +---- + +=== Adding an image to a theme + +To make images available to the theme add them to the `/resources/img` directory of your theme. These can be used from within stylesheets or +directly in HTML templates. + +For example to add an image to the `mytheme` copy an image to `themes/mytheme/login/resources/img/image.jpg`. + +You can then use this image from within a custom stylesheet with: + +[source,css] +---- +body { + background-image: url('../img/image.jpg'); + background-size: cover; +} +---- + +Or to use directly in HTML templates add the following to a custom HTML template: + +[source,html] +---- +My image description +---- + +[[_theme_custom_footer]] +=== Adding a custom footer to a login theme + +In order to use a custom footer, create a `footer.ftl` file in your custom login theme with the desired content. + +An example for a custom `footer.ftl` may look like this: + +[source] +---- +<#noparse> +<#macro content> +<#-- footer at the end of the login box --> +
    + +
    + + +---- + +=== Adding an image to an email theme + +To make images available to the theme add them to the `/email/resources/img` directory of your theme. These can be used from within directly in HTML templates. + +For example to add an image to the `mytheme` copy an image to `themes/mytheme/email/resources/img/logo.jpg`. + +To use directly in HTML templates add the following to a custom HTML template: + +[source,html] +---- +My image description +---- + +[[custom-identity-providers-icons]] +=== Adding custom Identity Providers icons + +{project_name} supports adding icons for custom Identity providers, which are displayed on the login screen. + +.Procedure + +. Define icon classes in your login `theme.properties` file (for example, `themes/mytheme/login/theme.properties`) with key pattern `kcLogoIdP-`. + +. For an Identity Provider with an alias `myProvider`, you may add a line to `theme.properties` file of your custom theme. For example: ++ +[source] +---- +kcLogoIdP-myProvider = fa fa-lock +---- + +All icons are available on the official website of PatternFly4. +Icons for social providers are already defined in `base` login theme properties (`themes/keycloak/login/theme.properties`), where you can inspire yourself. + +=== Creating a custom HTML template + +{project_name} uses https://freemarker.apache.org/[Apache Freemarker] templates to generate HTML and render pages. + +Although it is possible to create custom templates to change completely how pages are rendered, the recommendation is to leverage the built-in +templates as much as possible. The reasons are: + +* During upgrades, you might be forced to update your custom templates to get the latest updates from newer versions +* link:#_theme_stylesheet[Configuring CSS styles] to your themes allows you to adapt the UI to match your UI design standards and guidelines. +* link:{adminguide_link}#user-profile[User Profile] allows you to support custom user attributes and configure how they are rendered. + +In most cases, you won't need to change templates to adapt {project_name} to your needs, but you can override individual +templates in your own theme by creating `/