diff --git a/README.md b/README.md index 2486b5d..12de98d 100644 --- a/README.md +++ b/README.md @@ -78,4 +78,4 @@ and includes the full set of [free features](https://www.elastic.co/subscriptions). View the detailed release notes -[here](https://www.elastic.co/guide/en/elasticsearch/reference/8.17/es-release-notes.html). +[here](https://www.elastic.co/guide/en/elasticsearch/reference/9.1/es-release-notes.html). diff --git a/elasticsearch/Dockerfile b/elasticsearch/Dockerfile index 59e5a27..2569cab 100644 --- a/elasticsearch/Dockerfile +++ b/elasticsearch/Dockerfile @@ -1,76 +1,61 @@ -################################################################################ -# This Dockerfile was generated from the template at distribution/src/docker/Dockerfile -# -# Beginning of multi stage Dockerfile -################################################################################ + ################################################################################ # Build stage 1 `builder`: # Extract Elasticsearch artifact ################################################################################ -FROM ubuntu:20.04 AS builder - -# Install required packages to extract the Elasticsearch distribution - -RUN for iter in 1 2 3 4 5 6 7 8 9 10; do \ - apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y curl && \ - exit_code=0 && break || \ - exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ - done; \ - exit $exit_code - - # `tini` is a tiny but valid init for containers. This is used to cleanly - # control how ES and any child processes are shut down. - # For wolfi we pick it from the blessed wolfi package registry. - # - # The tini GitHub page gives instructions for verifying the binary using - # gpg, but the keyservers are slow to return the key and this can fail the - # build. Instead, we check the binary against the published checksum. - RUN set -eux ; \ - tini_bin="" ; \ - case "$(arch)" in \ - aarch64) tini_bin='tini-arm64' ;; \ - x86_64) tini_bin='tini-amd64' ;; \ - *) echo >&2 ; echo >&2 "Unsupported architecture $(arch)" ; echo >&2 ; exit 1 ;; \ - esac ; \ - curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v0.19.0/${tini_bin} ; \ - curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v0.19.0/${tini_bin}.sha256sum ; \ - sha256sum -c ${tini_bin}.sha256sum ; \ - rm ${tini_bin}.sha256sum ; \ - mv ${tini_bin} /bin/tini ; \ - chmod 0555 /bin/tini - -RUN mkdir /usr/share/elasticsearch -WORKDIR /usr/share/elasticsearch +FROM redhat/ubi9-minimal:latest AS builder -RUN curl --retry 10 -S -L --output /tmp/elasticsearch.tar.gz https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-8.17.0-linux-$(arch).tar.gz +RUN microdnf install -y findutils tar gzip -RUN tar -zxf /tmp/elasticsearch.tar.gz --strip-components=1 +# `tini` is a tiny but valid init for containers. This is used to cleanly +# control how ES and any child processes are shut down. +# +# The tini GitHub page gives instructions for verifying the binary using +# gpg, but the keyservers are slow to return the key and this can fail the +# build. Instead, we check the binary against the published checksum. +RUN set -eux; \ + arch="$(rpm --query --queryformat='%{ARCH}' rpm)"; \ + case "$arch" in \ + aarch64) tini_bin='tini-arm64'; tini_sum='07952557df20bfd2a95f9bef198b445e006171969499a1d361bd9e6f8e5e0e81' ;; \ + x86_64) tini_bin='tini-amd64'; tini_sum='93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c' ;; \ + *) echo >&2 "Unsupported architecture $arch"; exit 1 ;; \ + esac ; \ + curl -f --retry 10 -S -L -o /tmp/tini https://github.com/krallin/tini/releases/download/v0.19.0/${tini_bin}; \ + echo "${tini_sum} /tmp/tini" | sha256sum -c -; \ + mv /tmp/tini /bin/tini; \ + chmod 0555 /bin/tini -# The distribution includes a `config` directory, no need to create it -COPY config/elasticsearch.yml config/ -COPY config/log4j2.properties config/log4j2.docker.properties - -# 1. Configure the distribution for Docker -# 2. Create required directory -# 3. Move the distribution's default logging config aside -# 4. Move the generated docker logging config so that it is the default -# 5. Reset permissions on all directories -# 6. Reset permissions on all files -# 7. Make CLI tools executable -# 8. Make some directories writable. `bin` must be writable because -# plugins can install their own CLI utilities. -# 9. Make some files writable -RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elasticsearch-env && \ +WORKDIR /usr/share/elasticsearch +RUN arch="$(rpm --query --queryformat='%{ARCH}' rpm)" && curl -f --retry 10 -S -L --output /tmp/elasticsearch.tar.gz https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-9.1.5-linux-$arch.tar.gz +RUN tar -zxf /tmp/elasticsearch.tar.gz --strip-components=1 && \ +# Configure the distribution for Docker + sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elasticsearch-env && \ +# Create required directory mkdir data && \ - mv config/log4j2.properties config/log4j2.file.properties && \ - mv config/log4j2.docker.properties config/log4j2.properties && \ +# Reset permissions on all directories find . -type d -exec chmod 0555 {} + && \ +# keep default elasticsearch log4j config + mv config/log4j2.properties config/log4j2.file.properties && \ +# Reset permissions on all files find . -type f -exec chmod 0444 {} + && \ +# Make CLI tools executable chmod 0555 bin/* jdk/bin/* jdk/lib/jspawnhelper modules/x-pack-ml/platform/linux-*/bin/* && \ +# Make some directories writable. `bin` must be writable because +# plugins can install their own CLI utilities. chmod 0775 bin config config/jvm.options.d data logs plugins && \ - find config -type f -exec chmod 0664 {} + +# Make some files writable + find config -type f -exec chmod 0664 {} + && \ +# Tighten up permissions on the ES home dir (the permissions of the contents are handled below) + chmod 0775 . && \ +# You can't install plugins that include configuration when running as `elasticsearch` and the `config` +# dir is owned by `root`, because the installed tries to manipulate the permissions on the plugin's +# config directory. + chown 1000:1000 bin config config/jvm.options.d data logs plugins + +# The distribution includes a `config` directory, no need to create it +COPY --chmod=664 config/elasticsearch.yml config/log4j2.properties config/ ################################################################################ # Build stage 2 (the actual Elasticsearch image): @@ -79,83 +64,70 @@ RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elas # Add entrypoint ################################################################################ -FROM ubuntu:20.04 - -# Change default shell to bash, then install required packages with retries. -RUN yes no | dpkg-reconfigure dash && \ - for iter in 1 2 3 4 5 6 7 8 9 10; do \ - export DEBIAN_FRONTEND=noninteractive && \ - apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y --no-install-recommends \ - ca-certificates curl netcat p11-kit unzip zip && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* && \ - exit_code=0 && break || \ - exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ - done; \ - exit $exit_code +FROM redhat/ubi9-minimal:latest + +RUN microdnf install --setopt=tsflags=nodocs -y \ + nc shadow-utils zip unzip findutils procps-ng && \ + microdnf clean all RUN groupadd -g 1000 elasticsearch && \ - adduser --uid 1000 --gid 1000 --home /usr/share/elasticsearch elasticsearch && \ - adduser elasticsearch root && \ + adduser -u 1000 -g 1000 -G 0 -d /usr/share/elasticsearch elasticsearch && \ chown -R 0:0 /usr/share/elasticsearch -ENV ELASTIC_CONTAINER true +ENV ELASTIC_CONTAINER=true + +COPY --from=builder /bin/tini /bin/tini WORKDIR /usr/share/elasticsearch -COPY --from=builder --chown=0:0 /usr/share/elasticsearch /usr/share/elasticsearch - -COPY --from=builder --chown=0:0 /bin/tini /bin/tini - -ENV PATH /usr/share/elasticsearch/bin:$PATH -ENV SHELL /bin/bash -COPY bin/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh - -# 1. Sync the user and group permissions of /etc/passwd -# 2. Set correct permissions of the entrypoint -# 3. Ensure that there are no files with setuid or setgid, in order to mitigate "stackclash" attacks. -# We've already run this in previous layers so it ought to be a no-op. -# 4. Replace OpenJDK's built-in CA certificate keystore with the one from the OS -# vendor. The latter is superior in several ways. -# REF: https://github.com/elastic/elasticsearch-docker/issues/171 -# 5. Tighten up permissions on the ES home dir (the permissions of the contents are handled earlier) -# 6. You can't install plugins that include configuration when running as `elasticsearch` and the `config` -# dir is owned by `root`, because the installed tries to manipulate the permissions on the plugin's -# config directory. +COPY --from=builder --chown=0:0 /usr/share/elasticsearch . + +# Replace OpenJDK's built-in CA certificate keystore with the one from the OS +# vendor. The latter is superior in several ways. +# REF: https://github.com/elastic/elasticsearch-docker/issues/171 +RUN ln -sf /etc/pki/ca-trust/extracted/java/cacerts jdk/lib/security/cacerts + +ENV PATH=/usr/share/elasticsearch/bin:$PATH +ENV SHELL=/bin/bash + +COPY --chmod=0555 bin/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh + RUN chmod g=u /etc/passwd && \ - chmod 0555 /usr/local/bin/docker-entrypoint.sh && \ find / -xdev -perm -4000 -exec chmod ug-s {} + && \ chmod 0775 /usr/share/elasticsearch && \ chown elasticsearch bin config config/jvm.options.d data logs plugins -# Update "cacerts" bundle to use Ubuntu's CA certificates (and make sure it -# stays up-to-date with changes to Ubuntu's store) -COPY bin/docker-openjdk /etc/ca-certificates/update.d/docker-openjdk -RUN /etc/ca-certificates/update.d/docker-openjdk - EXPOSE 9200 9300 -LABEL org.label-schema.build-date="2024-12-11T12:08:05.663969764Z" \ +LABEL org.label-schema.build-date="2025-10-02T22:07:12.966975992Z" \ org.label-schema.license="Elastic-License-2.0" \ org.label-schema.name="Elasticsearch" \ org.label-schema.schema-version="1.0" \ org.label-schema.url="https://www.elastic.co/products/elasticsearch" \ org.label-schema.usage="https://www.elastic.co/guide/en/elasticsearch/reference/index.html" \ - org.label-schema.vcs-ref="2b6a7fed44faa321997703718f07ee0420804b41" \ + org.label-schema.vcs-ref="90ee222e7e0136dd8ddbb34015538f3a00c129b7" \ org.label-schema.vcs-url="https://github.com/elastic/elasticsearch" \ org.label-schema.vendor="Elastic" \ - org.label-schema.version="8.17.0" \ - org.opencontainers.image.created="2024-12-11T12:08:05.663969764Z" \ + org.label-schema.version="9.1.5" \ + org.opencontainers.image.created="2025-10-02T22:07:12.966975992Z" \ org.opencontainers.image.documentation="https://www.elastic.co/guide/en/elasticsearch/reference/index.html" \ org.opencontainers.image.licenses="Elastic-License-2.0" \ - org.opencontainers.image.revision="2b6a7fed44faa321997703718f07ee0420804b41" \ + org.opencontainers.image.revision="90ee222e7e0136dd8ddbb34015538f3a00c129b7" \ org.opencontainers.image.source="https://github.com/elastic/elasticsearch" \ org.opencontainers.image.title="Elasticsearch" \ org.opencontainers.image.url="https://www.elastic.co/products/elasticsearch" \ org.opencontainers.image.vendor="Elastic" \ - org.opencontainers.image.version="8.17.0" + org.opencontainers.image.version="9.1.5" + +LABEL name="Elasticsearch" \ + maintainer="infra@elastic.co" \ + vendor="Elastic" \ + version="9.1.5" \ + release="1" \ + summary="Elasticsearch" \ + description="You know, for search." + +RUN mkdir /licenses && ln LICENSE.txt /licenses/LICENSE # Our actual entrypoint is `tini`, a minimal but functional init program. It # calls the entrypoint we provide, while correctly forwarding signals. diff --git a/elasticsearch/bin/docker-entrypoint.sh b/elasticsearch/bin/docker-entrypoint.sh old mode 100755 new mode 100644 diff --git a/elasticsearch/bin/docker-openjdk b/elasticsearch/bin/docker-openjdk old mode 100755 new mode 100644 diff --git a/elasticsearch/config/log4j2.properties b/elasticsearch/config/log4j2.properties index c0d67c8..9b6435e 100644 --- a/elasticsearch/config/log4j2.properties +++ b/elasticsearch/config/log4j2.properties @@ -18,7 +18,7 @@ appender.deprecation_rolling.type = Console appender.deprecation_rolling.name = deprecation_rolling appender.deprecation_rolling.layout.type = ECSJsonLayout # Intentionally follows a different pattern to above -appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch +appender.deprecation_rolling.layout.dataset = elasticsearch.deprecation appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter appender.header_warning.type = HeaderWarningAppender @@ -58,6 +58,19 @@ logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling logger.index_indexing_slowlog.additivity = false +######## ES|QL query log JSON #################### +appender.esql_querylog_rolling.type = Console +appender.esql_querylog_rolling.name = esql_querylog_rolling +appender.esql_querylog_rolling.layout.type = ECSJsonLayout +appender.esql_querylog_rolling.layout.dataset = elasticsearch.esql_querylog + +################################################# + +logger.esql_querylog_rolling.name = esql.querylog +logger.esql_querylog_rolling.level = trace +logger.esql_querylog_rolling.appenderRef.esql_querylog_rolling.ref = esql_querylog_rolling +logger.esql_querylog_rolling.additivity = false + logger.org_apache_pdfbox.name = org.apache.pdfbox logger.org_apache_pdfbox.level = off @@ -70,6 +83,12 @@ logger.org_apache_fontbox.level = off logger.org_apache_xmlbeans.name = org.apache.xmlbeans logger.org_apache_xmlbeans.level = off +logger.entitlements_ingest_attachment.name = org.elasticsearch.entitlement.runtime.policy.PolicyManager.ingest-attachment.ALL-UNNAMED +logger.entitlements_ingest_attachment.level = error + +logger.entitlements_repository_gcs.name = org.elasticsearch.entitlement.runtime.policy.PolicyManager.repository-gcs.ALL-UNNAMED +logger.entitlements_repository_gcs.level = error + logger.com_amazonaws.name = com.amazonaws logger.com_amazonaws.level = warn @@ -85,6 +104,9 @@ logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.name = com.amazonaws.services.s3.internal.UseArnRegionResolver logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.level = error +logger.entitlements_repository_s3.name = org.elasticsearch.entitlement.runtime.policy.PolicyManager.repository-s3.ALL-UNNAMED +logger.entitlements_repository_s3.level = error + appender.audit_rolling.type = Console appender.audit_rolling.name = audit_rolling appender.audit_rolling.layout.type = PatternLayout @@ -190,4 +212,10 @@ logger.xmlsig.level = error logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter logger.samlxml_decrypt.level = fatal logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter -logger.saml2_decrypt.level = fatal \ No newline at end of file +logger.saml2_decrypt.level = fatal + +logger.entitlements_xpack_security.name = org.elasticsearch.entitlement.runtime.policy.PolicyManager.x-pack-security.org.elasticsearch.security +logger.entitlements_xpack_security.level = error + +logger.entitlements_inference.name = org.elasticsearch.entitlement.runtime.policy.PolicyManager.x-pack-inference.software.amazon.awssdk.profiles +logger.entitlements_inference.level = error diff --git a/kibana/Dockerfile b/kibana/Dockerfile index 55b4377..f76f372 100644 --- a/kibana/Dockerfile +++ b/kibana/Dockerfile @@ -9,14 +9,15 @@ # Build stage 0 `builder`: # Extract Kibana artifact ################################################################################ -FROM ubuntu:20.04 AS builder +FROM redhat/ubi9-minimal:latest AS builder -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y curl +RUN microdnf install -y findutils tar gzip RUN cd /tmp && \ - curl --retry 8 -s -L \ + arch="$(rpm --query --queryformat='%{ARCH}' rpm)" && \ + curl -f --retry 8 -s -L \ --output kibana.tar.gz \ - https://artifacts.elastic.co/downloads/kibana/kibana-8.17.0-linux-$(arch).tar.gz && \ + https://artifacts.elastic.co/downloads/kibana/kibana-9.1.5-linux-${arch}.tar.gz && \ cd - RUN mkdir /usr/share/kibana @@ -34,20 +35,21 @@ RUN chmod -R g=u /usr/share/kibana # Add an init process, check the checksum to make sure it's a match RUN set -e ; \ TINI_BIN="" ; \ - case "$(arch)" in \ + arch="$(rpm --query --queryformat='%{ARCH}' rpm)"; \ + case "$arch" in \ aarch64) \ TINI_BIN='tini-arm64' ; \ + TINI_CHECKSUM='07952557df20bfd2a95f9bef198b445e006171969499a1d361bd9e6f8e5e0e81' ; \ ;; \ x86_64) \ TINI_BIN='tini-amd64' ; \ + TINI_CHECKSUM='93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c' ; \ ;; \ - *) echo >&2 "Unsupported architecture $(arch)" ; exit 1 ;; \ + *) echo >&2 "Unsupported architecture $arch" ; exit 1 ;; \ esac ; \ TINI_VERSION='v0.19.0' ; \ - curl --retry 8 -S -L -O "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/${TINI_BIN}" ; \ - curl --retry 8 -S -L -O "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/${TINI_BIN}.sha256sum" ; \ - sha256sum -c "${TINI_BIN}.sha256sum" ; \ - rm "${TINI_BIN}.sha256sum" ; \ + curl -f --retry 8 -S -L -O "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/${TINI_BIN}" ; \ + echo "${TINI_CHECKSUM} ${TINI_BIN}" | sha256sum -c - ; \ mv "${TINI_BIN}" /bin/tini ; \ chmod +x /bin/tini RUN mkdir -p /usr/share/fonts/local && \ @@ -61,20 +63,12 @@ RUN mkdir -p /usr/share/fonts/local && \ # Copy kibana from stage 0 # Add entrypoint ################################################################################ -FROM ubuntu:20.04 +FROM redhat/ubi9-minimal:latest EXPOSE 5601 -RUN for iter in {1..10}; do \ - export DEBIAN_FRONTEND=noninteractive && \ - apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y --no-install-recommends \ - fontconfig libnss3 curl ca-certificates && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* && exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && \ - sleep 10; \ - done; \ - (exit $exit_code) +RUN microdnf install --setopt=tsflags=nodocs -y \ + fontconfig liberation-fonts-common freetype shadow-utils nss findutils && \ + microdnf clean all # Bring in Kibana from the initial stage. COPY --from=builder --chown=1000:0 /usr/share/kibana /usr/share/kibana @@ -83,9 +77,10 @@ COPY --from=builder --chown=0:0 /bin/tini /bin/tini COPY --from=builder --chown=0:0 /usr/share/fonts/local/NotoSansCJK-Regular.ttc /usr/share/fonts/local/NotoSansCJK-Regular.ttc RUN fc-cache -v WORKDIR /usr/share/kibana + RUN ln -s /usr/share/kibana /opt/kibana -ENV ELASTIC_CONTAINER true +ENV ELASTIC_CONTAINER=true ENV PATH=/usr/share/kibana/bin:$PATH # Set some Kibana configuration defaults. @@ -108,26 +103,35 @@ RUN groupadd --gid 1000 kibana && \ --home-dir /usr/share/kibana --no-create-home \ kibana -LABEL org.label-schema.build-date="2024-12-11T11:12:31.173Z" \ +LABEL org.label-schema.build-date="2025-10-02T12:43:32.321Z" \ org.label-schema.license="Elastic License" \ org.label-schema.name="Kibana" \ org.label-schema.schema-version="1.0" \ org.label-schema.url="https://www.elastic.co/products/kibana" \ org.label-schema.usage="https://www.elastic.co/guide/en/kibana/reference/index.html" \ - org.label-schema.vcs-ref="86cbc85e621f4f3f701ed230f4e859ac5a80145b" \ + org.label-schema.vcs-ref="4a62c99c68a5156b84e1bf986d47e0a317591820" \ org.label-schema.vcs-url="https://github.com/elastic/kibana" \ org.label-schema.vendor="Elastic" \ - org.label-schema.version="8.17.0" \ - org.opencontainers.image.created="2024-12-11T11:12:31.173Z" \ + org.label-schema.version="9.1.5" \ + org.opencontainers.image.created="2025-10-02T12:43:32.321Z" \ org.opencontainers.image.documentation="https://www.elastic.co/guide/en/kibana/reference/index.html" \ org.opencontainers.image.licenses="Elastic License" \ - org.opencontainers.image.revision="86cbc85e621f4f3f701ed230f4e859ac5a80145b" \ + org.opencontainers.image.revision="4a62c99c68a5156b84e1bf986d47e0a317591820" \ org.opencontainers.image.source="https://github.com/elastic/kibana" \ org.opencontainers.image.title="Kibana" \ org.opencontainers.image.url="https://www.elastic.co/products/kibana" \ org.opencontainers.image.vendor="Elastic" \ - org.opencontainers.image.version="8.17.0" + org.opencontainers.image.version="9.1.5" + +LABEL name="Kibana" \ + maintainer="infra@elastic.co" \ + vendor="Elastic" \ + version="9.1.5" \ + release="1" \ + summary="Kibana" \ + description="Your window into the Elastic Stack." +RUN mkdir /licenses && ln LICENSE.txt /licenses/LICENSE ENTRYPOINT ["/bin/tini", "--"] diff --git a/kibana/bin/kibana-docker b/kibana/bin/kibana-docker index f4ae377..cd57325 100755 --- a/kibana/bin/kibana-docker +++ b/kibana/bin/kibana-docker @@ -40,6 +40,7 @@ kibana_vars=( csp.report_uri csp.report_to csp.report_only.form_action + csp.report_only.object_src permissionsPolicy.report_to data.autocomplete.valueSuggestions.terminateAfter data.autocomplete.valueSuggestions.timeout @@ -83,9 +84,6 @@ kibana_vars=( elasticsearch.ssl.truststore.path elasticsearch.ssl.verificationMode elasticsearch.username - enterpriseSearch.accessCheckTimeout - enterpriseSearch.accessCheckTimeoutWarning - enterpriseSearch.host externalUrl.policy i18n.locale interactiveSetup.enabled @@ -165,6 +163,7 @@ kibana_vars=( server.name server.port server.protocol + server.prototypeHardening server.publicBaseUrl server.requestId.allowFromAnyIp server.requestId.ipAllowlist @@ -213,6 +212,9 @@ kibana_vars=( xpack.actions.allowedHosts xpack.actions.customHostSettings xpack.actions.email.domain_allowlist + xpack.actions.email.services.ses.host + xpack.actions.email.services.ses.port + xpack.actions.email.services.enabled xpack.actions.enableFooterInEmail xpack.actions.enabledActionTypes xpack.actions.maxResponseContentLength @@ -221,16 +223,14 @@ kibana_vars=( xpack.actions.proxyBypassHosts xpack.actions.proxyHeaders xpack.actions.proxyOnlyHosts - xpack.actions.proxyRejectUnauthorizedCertificates xpack.actions.proxyUrl - xpack.actions.rejectUnauthorized xpack.actions.responseTimeout xpack.actions.ssl.proxyVerificationMode xpack.actions.ssl.verificationMode + xpack.actions.webhook.ssl.pfx.enabled xpack.alerting.healthCheck.interval xpack.alerting.invalidateApiKeysTask.interval xpack.alerting.invalidateApiKeysTask.removalDelay - xpack.alerting.defaultRuleTaskTimeout xpack.alerting.rules.run.timeout xpack.alerting.rules.run.ruleTypeOverrides xpack.alerting.cancelAlertsOnRuleTimeout @@ -239,10 +239,9 @@ kibana_vars=( xpack.alerting.rules.run.actions.max xpack.alerting.rules.run.alerts.max xpack.alerting.rules.run.actions.connectorTypeOverrides - xpack.alerting.maxScheduledPerMinute - xpack.alerts.healthCheck.interval - xpack.alerts.invalidateApiKeysTask.interval - xpack.alerts.invalidateApiKeysTask.removalDelay + xpack.alerting.rules.maxScheduledPerMinute + xpack.alerting.disabledRuleTypes + xpack.alerting.enabledRuleTypes xpack.apm.indices.error xpack.apm.indices.metric xpack.apm.indices.onboarding @@ -322,7 +321,6 @@ kibana_vars=( xpack.productDocBase.artifactRepositoryUrl xpack.reporting.capture.browser.autoDownload xpack.reporting.capture.browser.chromium.disableSandbox - xpack.reporting.capture.browser.chromium.inspect xpack.reporting.capture.browser.chromium.maxScreenshotDimension xpack.reporting.capture.browser.chromium.proxy.bypass xpack.reporting.capture.browser.chromium.proxy.enabled @@ -383,6 +381,7 @@ kibana_vars=( xpack.security.audit.appender.strategy.type xpack.security.audit.appender.type xpack.security.audit.enabled + xpack.security.audit.include_saved_object_names xpack.security.audit.ignore_filters xpack.security.authc.http.autoSchemesEnabled xpack.security.authc.http.enabled @@ -415,6 +414,7 @@ kibana_vars=( xpack.securitySolution.packagerTaskInterval xpack.securitySolution.prebuiltRulesPackageVersion xpack.spaces.maxSpaces + xpack.spaces.defaultSolution xpack.task_manager.capacity xpack.task_manager.claim_strategy xpack.task_manager.auto_calculate_default_ech_capacity diff --git a/logstash/Dockerfile b/logstash/Dockerfile index 8fbacad..f58ff3d 100644 --- a/logstash/Dockerfile +++ b/logstash/Dockerfile @@ -1,95 +1,83 @@ # This Dockerfile was generated from templates/Dockerfile.erb + + +# Build env2yaml +FROM golang:1.25 AS builder-env2yaml +COPY env2yaml/env2yaml.go env2yaml/go.mod env2yaml/go.sum /tmp/go/src/env2yaml/ - -FROM ubuntu:20.04 +WORKDIR /tmp/go/src/env2yaml -RUN for iter in {1..10}; do \ - export DEBIAN_FRONTEND=noninteractive && \ - apt-get update -y && \ - apt-get upgrade -y && \ - apt-get install -y procps findutils tar gzip && \ - apt-get install -y locales && \ - apt-get install -y curl && \ - apt-get clean all && \ - locale-gen 'en_US.UTF-8' && \ - apt-get clean metadata && \ - exit_code=0 && break || exit_code=$? && \ -echo "packaging error: retry $iter in 10s" && \ -apt-get clean all && \ - apt-get clean metadata && \ -sleep 10; done; \ -(exit $exit_code) +RUN go build -trimpath -# Provide a non-root user to run the process. -RUN groupadd --gid 1000 logstash && \ - adduser --uid 1000 --gid 1000 --home /usr/share/logstash --no-create-home logstash +# Build main image +# Minimal distributions do not ship with en language packs. +FROM redhat/ubi9-minimal:latest + +ENV ELASTIC_CONTAINER=true +ENV PATH=/usr/share/logstash/bin:$PATH +ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 -# Add Logstash itself. -RUN curl -Lo - https://artifacts.elastic.co/downloads/logstash/logstash-8.17.0-linux-$(arch).tar.gz | \ - tar zxf - -C /usr/share && \ - mv /usr/share/logstash-8.17.0 /usr/share/logstash && \ - chown --recursive logstash:logstash /usr/share/logstash/ && \ +WORKDIR /usr/share + +# Install packages +RUN \ + microdnf install -y procps findutils tar gzip && \ + microdnf install -y openssl && \ + microdnf install -y which shadow-utils && \ + microdnf clean all + +# Provide a non-root user to run the process +# Add Logstash itself and set permissions +RUN groupadd --gid 1000 logstash && \ + adduser --uid 1000 --gid 1000 \ + --home "/usr/share/logstash" \ + --no-create-home \ + logstash && \ + arch="$(rpm --query --queryformat='%{ARCH}' rpm)" && \ + curl -f -Lo logstash.tar.gz https://artifacts.elastic.co/downloads/logstash/logstash-9.1.5-linux-${arch}.tar.gz && \ + tar -zxf logstash.tar.gz -C /usr/share && \ + rm logstash.tar.gz && \ + mv /usr/share/logstash-9.1.5 /usr/share/logstash && \ chown -R logstash:root /usr/share/logstash && \ chmod -R g=u /usr/share/logstash && \ - mkdir /licenses/ && \ + mkdir /licenses && \ mv /usr/share/logstash/NOTICE.TXT /licenses/NOTICE.TXT && \ mv /usr/share/logstash/LICENSE.txt /licenses/LICENSE.txt && \ find /usr/share/logstash -type d -exec chmod g+s {} \; && \ ln -s /usr/share/logstash /opt/logstash -WORKDIR /usr/share/logstash -ENV ELASTIC_CONTAINER true -ENV PATH=/usr/share/logstash/bin:$PATH - -# Provide a minimal configuration, so that simple invocations will provide -# a good experience. - COPY config/logstash-full.yml config/logstash.yml -COPY config/pipelines.yml config/log4j2.properties config/log4j2.file.properties config/ -COPY pipeline/default.conf pipeline/logstash.conf - -RUN chown --recursive logstash:root config/ pipeline/ -# Ensure Logstash gets the correct locale by default. -ENV LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 - -COPY env2yaml/env2yaml-amd64 env2yaml/env2yaml-arm64 env2yaml/ -# Copy over the appropriate env2yaml artifact -RUN env2yamlarch="$(dpkg --print-architecture)"; \ - case "${env2yamlarch}" in \ - 'x86_64'|'amd64') \ - env2yamlarch=amd64; \ - ;; \ - 'aarch64'|'arm64') \ - env2yamlarch=arm64; \ - ;; \ - *) echo >&2 "error: unsupported architecture '$env2yamlarch'"; exit 1 ;; \ - esac; \ - mkdir -p /usr/local/bin; \ - cp env2yaml/env2yaml-${env2yamlarch} /usr/local/bin/env2yaml; \ - rm -rf env2yaml -# Place the startup wrapper script. -COPY bin/docker-entrypoint /usr/local/bin/ - -RUN chmod 0755 /usr/local/bin/docker-entrypoint +COPY --from=builder-env2yaml /tmp/go/src/env2yaml/env2yaml /usr/local/bin/env2yaml +COPY --chown=logstash:root config/pipelines.yml config/log4j2.properties config/log4j2.file.properties /usr/share/logstash/config/ +COPY --chown=logstash:root config/logstash-full.yml /usr/share/logstash/config/logstash.yml +COPY --chown=logstash:root pipeline/default.conf /usr/share/logstash/pipeline/logstash.conf +COPY --chmod=0755 bin/docker-entrypoint /usr/local/bin/ +WORKDIR /usr/share/logstash USER 1000 EXPOSE 9600 5044 -LABEL org.label-schema.schema-version="1.0" \ - org.label-schema.vendor="Elastic" \ - org.opencontainers.image.vendor="Elastic" \ +LABEL org.label-schema.build-date=2025-09-30T18:55:09+00:00 \ + org.label-schema.license="Elastic License" \ org.label-schema.name="logstash" \ - org.opencontainers.image.title="logstash" \ - org.label-schema.version="8.17.0" \ - org.opencontainers.image.version="8.17.0" \ + org.label-schema.schema-version="1.0" \ org.label-schema.url="https://www.elastic.co/products/logstash" \ org.label-schema.vcs-url="https://github.com/elastic/logstash" \ - org.label-schema.license="Elastic License" \ - org.opencontainers.image.licenses="Elastic License" \ + org.label-schema.vendor="Elastic" \ + org.label-schema.version="9.1.5" \ + org.opencontainers.image.created=2025-09-30T18:55:09+00:00 \ org.opencontainers.image.description="Logstash is a free and open server-side data processing pipeline that ingests data from a multitude of sources, transforms it, and then sends it to your favorite 'stash.'" \ - org.label-schema.build-date=2024-12-05T00:55:38+00:00 \ - org.opencontainers.image.created=2024-12-05T00:55:38+00:00 + org.opencontainers.image.licenses="Elastic License" \ + org.opencontainers.image.title="logstash" \ + org.opencontainers.image.vendor="Elastic" \ + org.opencontainers.image.version="9.1.5" \ + description="Logstash is a free and open server-side data processing pipeline that ingests data from a multitude of sources, transforms it, and then sends it to your favorite 'stash.'" \ + license="Elastic License" \ + maintainer="info@elastic.co" \ + name="logstash" \ + summary="Logstash is a free and open server-side data processing pipeline that ingests data from a multitude of sources, transforms it, and then sends it to your favorite 'stash.'" \ + vendor="Elastic" ENTRYPOINT ["/usr/local/bin/docker-entrypoint"] diff --git a/logstash/config/logstash-full.yml b/logstash/config/logstash-full.yml index 58e1a35..949b467 100644 --- a/logstash/config/logstash-full.yml +++ b/logstash/config/logstash-full.yml @@ -1,2 +1,2 @@ -http.host: "0.0.0.0" +api.http.host: "0.0.0.0" xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] diff --git a/logstash/config/logstash-oss.yml b/logstash/config/logstash-oss.yml index 342d19a..979a5ca 100644 --- a/logstash/config/logstash-oss.yml +++ b/logstash/config/logstash-oss.yml @@ -1 +1 @@ -http.host: "0.0.0.0" +api.http.host: "0.0.0.0" diff --git a/logstash/env2yaml/env2yaml-amd64 b/logstash/env2yaml/env2yaml-amd64 deleted file mode 100755 index b014d39..0000000 Binary files a/logstash/env2yaml/env2yaml-amd64 and /dev/null differ diff --git a/logstash/env2yaml/env2yaml-arm64 b/logstash/env2yaml/env2yaml-arm64 deleted file mode 100755 index 61bc088..0000000 Binary files a/logstash/env2yaml/env2yaml-arm64 and /dev/null differ diff --git a/logstash/env2yaml/env2yaml.go b/logstash/env2yaml/env2yaml.go new file mode 100644 index 0000000..95fc569 --- /dev/null +++ b/logstash/env2yaml/env2yaml.go @@ -0,0 +1,198 @@ +// env2yaml +// +// Merge environment variables into logstash.yml. +// For example, running Docker with: +// +// docker run -e pipeline.workers=6 +// +// or +// +// docker run -e PIPELINE_WORKERS=6 +// +// will cause logstash.yml to contain the line: +// +// pipeline.workers: 6 +package main + +import ( + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + + "gopkg.in/yaml.v2" +) + +var validSettings = []string{ + "api.enabled", + "api.http.host", + "api.http.port", + "api.environment", + "node.name", + "path.data", + "pipeline.id", + "pipeline.workers", + "pipeline.output.workers", + "pipeline.batch.size", + "pipeline.batch.delay", + "pipeline.unsafe_shutdown", + "pipeline.ecs_compatibility", + "pipeline.ordered", + "pipeline.plugin_classloaders", + "pipeline.separate_logs", + "path.config", + "config.string", + "config.test_and_exit", + "config.reload.automatic", + "config.reload.interval", + "config.debug", + "config.support_escapes", + "config.field_reference.escape_style", + "queue.type", + "path.queue", + "queue.page_capacity", + "queue.max_events", + "queue.max_bytes", + "queue.checkpoint.acks", + "queue.checkpoint.writes", + "queue.checkpoint.interval", // remove it for #17155 + "queue.drain", + "dead_letter_queue.enable", + "dead_letter_queue.max_bytes", + "dead_letter_queue.flush_interval", + "dead_letter_queue.storage_policy", + "dead_letter_queue.retain.age", + "path.dead_letter_queue", + "log.level", + "log.format", + "log.format.json.fix_duplicate_message_fields", + "metric.collect", + "path.logs", + "path.plugins", + "api.auth.type", + "api.auth.basic.username", + "api.auth.basic.password", + "api.auth.basic.password_policy.mode", + "api.auth.basic.password_policy.length.minimum", + "api.auth.basic.password_policy.include.upper", + "api.auth.basic.password_policy.include.lower", + "api.auth.basic.password_policy.include.digit", + "api.auth.basic.password_policy.include.symbol", + "allow_superuser", + "monitoring.cluster_uuid", + "xpack.monitoring.allow_legacy_collection", + "xpack.monitoring.enabled", + "xpack.monitoring.collection.interval", + "xpack.monitoring.elasticsearch.hosts", + "xpack.monitoring.elasticsearch.username", + "xpack.monitoring.elasticsearch.password", + "xpack.monitoring.elasticsearch.proxy", + "xpack.monitoring.elasticsearch.api_key", + "xpack.monitoring.elasticsearch.cloud_auth", + "xpack.monitoring.elasticsearch.cloud_id", + "xpack.monitoring.elasticsearch.sniffing", + "xpack.monitoring.elasticsearch.ssl.certificate_authority", + "xpack.monitoring.elasticsearch.ssl.ca_trusted_fingerprint", + "xpack.monitoring.elasticsearch.ssl.verification_mode", + "xpack.monitoring.elasticsearch.ssl.truststore.path", + "xpack.monitoring.elasticsearch.ssl.truststore.password", + "xpack.monitoring.elasticsearch.ssl.keystore.path", + "xpack.monitoring.elasticsearch.ssl.keystore.password", + "xpack.monitoring.elasticsearch.ssl.certificate", + "xpack.monitoring.elasticsearch.ssl.key", + "xpack.monitoring.elasticsearch.ssl.cipher_suites", + "xpack.management.enabled", + "xpack.management.logstash.poll_interval", + "xpack.management.pipeline.id", + "xpack.management.elasticsearch.hosts", + "xpack.management.elasticsearch.username", + "xpack.management.elasticsearch.password", + "xpack.management.elasticsearch.proxy", + "xpack.management.elasticsearch.api_key", + "xpack.management.elasticsearch.cloud_auth", + "xpack.management.elasticsearch.cloud_id", + "xpack.management.elasticsearch.sniffing", + "xpack.management.elasticsearch.ssl.certificate_authority", + "xpack.management.elasticsearch.ssl.ca_trusted_fingerprint", + "xpack.management.elasticsearch.ssl.verification_mode", + "xpack.management.elasticsearch.ssl.truststore.path", + "xpack.management.elasticsearch.ssl.truststore.password", + "xpack.management.elasticsearch.ssl.keystore.path", + "xpack.management.elasticsearch.ssl.keystore.password", + "xpack.management.elasticsearch.ssl.certificate", + "xpack.management.elasticsearch.ssl.key", + "xpack.management.elasticsearch.ssl.cipher_suites", + "xpack.geoip.download.endpoint", + "xpack.geoip.downloader.enabled", +} + +// Given a setting name, return a downcased version with delimiters removed. +func squashSetting(setting string) string { + downcased := strings.ToLower(setting) + de_dotted := strings.Replace(downcased, ".", "", -1) + de_underscored := strings.Replace(de_dotted, "_", "", -1) + return de_underscored +} + +// Given a setting name like "pipeline.workers" or "PIPELINE_UNSAFE_SHUTDOWN", +// return the canonical setting name. eg. 'pipeline.unsafe_shutdown' +func normalizeSetting(setting string) (string, error) { + for _, validSetting := range validSettings { + if squashSetting(setting) == squashSetting(validSetting) { + return validSetting, nil + } + } + return "", errors.New("Invalid setting: " + setting) +} + +func main() { + if len(os.Args) != 2 { + log.Fatalf("usage: env2yaml FILENAME") + } + settingsFilePath := os.Args[1] + + settingsFile, err := ioutil.ReadFile(settingsFilePath) + if err != nil { + log.Fatalf("error: %v", err) + } + + // Read the original settings file into a map. + settings := make(map[string]interface{}) + err = yaml.Unmarshal(settingsFile, &settings) + if err != nil { + log.Fatalf("error: %v", err) + } + + // Merge any valid settings found in the environment. + foundNewSettings := false + for _, line := range os.Environ() { + kv := strings.SplitN(line, "=", 2) + key := kv[0] + setting, err := normalizeSetting(key) + if err == nil { + foundNewSettings = true + log.Printf("Setting '%s' from environment.", setting) + // we need to keep ${KEY} in the logstash.yml to let Logstash decide using ${KEY}'s value from either keystore or environment + settings[setting] = fmt.Sprintf("${%s}", key) + } + } + + if foundNewSettings { + output, err := yaml.Marshal(&settings) + if err != nil { + log.Fatalf("error: %v", err) + } + + stat, err := os.Stat(settingsFilePath) + if err != nil { + log.Fatalf("error: %v", err) + } + + err = ioutil.WriteFile(settingsFilePath, output, stat.Mode()) + if err != nil { + log.Fatalf("error: %v", err) + } + } +} diff --git a/logstash/env2yaml/go.mod b/logstash/env2yaml/go.mod new file mode 100644 index 0000000..7600007 --- /dev/null +++ b/logstash/env2yaml/go.mod @@ -0,0 +1,5 @@ +module logstash/env2yaml + +go 1.21 + +require gopkg.in/yaml.v2 v2.4.0 diff --git a/logstash/env2yaml/go.sum b/logstash/env2yaml/go.sum new file mode 100644 index 0000000..7534661 --- /dev/null +++ b/logstash/env2yaml/go.sum @@ -0,0 +1,3 @@ +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=