diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index ee84aafe6..000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,27 +0,0 @@ -version: 2 -machine: true -jobs: - build: - machine: - image: ubuntu-2204:2023.07.2 - working_directory: ~/circleci-java - steps: - - run: - name: Install OpenJDK 17 - command: | - sudo apt-get update && sudo apt-get install -y openjdk-17-jdk - sudo update-alternatives --set java /usr/lib/jvm/java-17-openjdk-amd64/bin/java - sudo update-alternatives --set javac /usr/lib/jvm/java-17-openjdk-amd64/bin/javac - java -version - - checkout - - restore_cache: - key: maven-dependencies-{{ checksum "pom.xml" }} - - run: cd prometheus-metrics-shaded-dependencies && ../mvnw clean install && cd .. - - run: ./mvnw clean install - - run: ./mvnw javadoc:jar - - save_cache: - paths: - - ~/.m2 - key: maven-dependencies-{{ checksum "pom.xml" }} -orbs: - prometheus: prometheus/prometheus@0.16.0 diff --git a/.codespellrc b/.codespellrc new file mode 100644 index 000000000..e4dbb3a7f --- /dev/null +++ b/.codespellrc @@ -0,0 +1,5 @@ +[codespell] +# Ignore words that are valid technical terms: +# - vertx: Vert.x reactive framework +# - errorprone: Error Prone static analysis tool +ignore-words-list = vertx,errorprone diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..80219f95e --- /dev/null +++ b/.editorconfig @@ -0,0 +1,22 @@ +root = true + +[*] +max_line_length = 100 +indent_size = 2 + +[{version-rules.xml,maven-wrapper.properties,checkstyle.xml,docker-compose.yaml,docker-compose.yml,Dockerfile,example_target_info.json,mise.toml,mvnm,mvnw.cmd,generate-protobuf.sh,.gitleaksignore,prometheus.properties}] +max_line_length = 200 + +[{grafana-dashboard-*.json,.editorconfig,super-linter.env,lychee.toml,renovate.json5}] +max_line_length = 300 + +[pom.xml] +max_line_length = 120 + +[*.py] +# checked by black +indent_size = 4 +max_line_length = 120 + +[{.mise/tasks/build-release.sh,.github/workflows/multi-version-test.yml}] +max_line_length = 200 diff --git a/.github/config/.release-please-manifest.json b/.github/config/.release-please-manifest.json new file mode 100644 index 000000000..dd8fde779 --- /dev/null +++ b/.github/config/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "1.5.0" +} diff --git a/.github/config/lychee.toml b/.github/config/lychee.toml new file mode 100644 index 000000000..0f338a7d3 --- /dev/null +++ b/.github/config/lychee.toml @@ -0,0 +1,30 @@ +# Lychee configuration file +# See https://lychee.cli.rs/config/ + +timeout = 30 +retry_wait_time = 10 +max_retries = 2 +max_concurrency = 4 + +# Check link anchors +include_fragments = true + +base_url = "https://prometheus.github.io" +exclude_path = ["docs/themes"] + +exclude = [ + # excluding links to pull requests and issues is done for performance + "^https://github.com/prometheus/client_java/(issues|pull)/\\d+$", + + # exclude localhost URLs as they require running services + "^http://localhost", + "^https://localhost", + + '#', + 'CONTRIBUTING.md', + 'LICENSE', + 'MAINTAINERS.md', + + # exclude private GitHub settings pages + "^https://github.com/prometheus/client_java/settings/", +] diff --git a/.github/config/release-please-config.json b/.github/config/release-please-config.json new file mode 100644 index 000000000..1953ed529 --- /dev/null +++ b/.github/config/release-please-config.json @@ -0,0 +1,15 @@ +{ + "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json", + "separate-pull-requests": false, + "pull-request-footer": "> [!IMPORTANT]\n> Close and reopen this PR to trigger CI checks.", + "packages": { + ".": { + "release-type": "java", + "versioning": "always-bump-patch", + "extra-files": [ + "prometheus-metrics-parent/pom.xml", + "integration-tests/it-spring-boot-smoke-test/pom.xml" + ] + } + } +} diff --git a/.github/config/super-linter.env b/.github/config/super-linter.env new file mode 100644 index 000000000..566f1bfb3 --- /dev/null +++ b/.github/config/super-linter.env @@ -0,0 +1,43 @@ +FILTER_REGEX_EXCLUDE=mvnw|src/main/generated/.*|docs/themes/.*|keystore.pkcs12|.*.java|prometheus-metrics-exporter-opentelemetry-shaded/pom.xml|CODE_OF_CONDUCT.md|CLAUDE.md +IGNORE_GITIGNORED_FILES=true +JAVA_FILE_NAME=google_checks.xml +LOG_LEVEL=ERROR +# conflicts with prettier +VALIDATE_BIOME_FORMAT=false +# conflicts with prettier +VALIDATE_BIOME_LINT=false +# disable kubernetes linter - complains about resource limits, etc +VALIDATE_CHECKOV=false +VALIDATE_CSS=false +VALIDATE_CSS_PRETTIER=false +VALIDATE_DOCKERFILE_HADOLINT=false +VALIDATE_GIT_COMMITLINT=false +# done by maven +VALIDATE_GOOGLE_JAVA_FORMAT=false +# times out +VALIDATE_GO_MODULES=false +VALIDATE_HTML=false +# done by checkstyle +VALIDATE_JAVA=false +# we have many duplicate code in our codebase for demo purposes +VALIDATE_JSCPD=false +VALIDATE_PYTHON_PYLINT=false +# conflicts with black +VALIDATE_PYTHON_RUFF_FORMAT=false +# excluding simpleclient-archive doesn't seem to work +VALIDATE_TRIVY=false + +FIX_ENV=true +FIX_GITHUB_ACTIONS_ZIZMOR=true +FIX_GO=true +FIX_JAVASCRIPT_PRETTIER=true +FIX_JSON=true +FIX_JSONC=true +FIX_JSONC_PRETTIER=true +FIX_JSON_PRETTIER=true +FIX_MARKDOWN=true +FIX_MARKDOWN_PRETTIER=true +FIX_PYTHON_BLACK=true +FIX_SHELL_SHFMT=true +FIX_SPELL_CODESPELL=true +FIX_YAML_PRETTIER=true diff --git a/.github/release.yml b/.github/release.yml new file mode 100644 index 000000000..fb9b4d9d4 --- /dev/null +++ b/.github/release.yml @@ -0,0 +1,17 @@ +# .github/release.yml + +changelog: + categories: + - title: 🏕 Features + labels: + - "*" + exclude: + labels: + - chore + - dependencies + - title: 🧹 Chore + labels: + - chore + - title: 👒 Dependencies + labels: + - dependencies diff --git a/.github/renovate-tracked-deps.json b/.github/renovate-tracked-deps.json new file mode 100644 index 000000000..5e0fe8a13 --- /dev/null +++ b/.github/renovate-tracked-deps.json @@ -0,0 +1,590 @@ +{ + ".github/renovate.json5": { + "renovate-config-presets": ["grafana/flint"] + }, + ".github/workflows/acceptance-tests.yml": { + "regex": ["mise"] + }, + ".github/workflows/build.yml": { + "regex": ["mise"] + }, + ".github/workflows/generate-protobuf.yml": { + "regex": ["mise"] + }, + ".github/workflows/github-pages.yaml": { + "regex": ["mise"] + }, + ".github/workflows/java-version-matrix-tests.yml": { + "regex": ["mise"] + }, + ".github/workflows/lint.yml": { + "regex": ["mise"] + }, + ".github/workflows/native-tests.yml": { + "regex": ["mise"] + }, + ".github/workflows/nightly-benchmarks.yml": { + "regex": ["mise"] + }, + ".github/workflows/release.yml": { + "regex": ["mise"] + }, + ".github/workflows/test-release-build.yml": { + "regex": ["mise"] + }, + ".mise/envs/native/mise.toml": { + "mise": ["java"] + }, + ".mvn/wrapper/maven-wrapper.properties": { + "maven-wrapper": ["maven"] + }, + "benchmarks/pom.xml": { + "maven": [ + "com.codahale.metrics:metrics-core", + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exposition-textformats", + "io.prometheus:simpleclient", + "org.openjdk.jmh:jmh-core", + "org.openjdk.jmh:jmh-generator-annprocess" + ] + }, + "integration-tests/it-common/pom.xml": { + "maven": [ + "io.prometheus:integration-tests", + "io.prometheus:prometheus-metrics-exposition-formats" + ] + }, + "integration-tests/it-exporter/it-exporter-duplicate-metrics-sample/pom.xml": { + "maven": [ + "io.prometheus:it-exporter", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-httpserver" + ] + }, + "integration-tests/it-exporter/it-exporter-httpserver-sample/pom.xml": { + "maven": [ + "io.prometheus:it-exporter", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-httpserver" + ] + }, + "integration-tests/it-exporter/it-exporter-no-protobuf/pom.xml": { + "maven": [ + "io.prometheus:it-exporter", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-httpserver" + ] + }, + "integration-tests/it-exporter/it-exporter-servlet-jetty-sample/pom.xml": { + "maven": [ + "io.prometheus:it-exporter", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-servlet-jakarta", + "org.eclipse.jetty.ee10:jetty-ee10-servlet", + "org.eclipse.jetty:jetty-server" + ] + }, + "integration-tests/it-exporter/it-exporter-servlet-tomcat-sample/pom.xml": { + "maven": [ + "io.prometheus:it-exporter", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-servlet-jakarta", + "org.apache.tomcat.embed:tomcat-embed-core" + ] + }, + "integration-tests/it-exporter/it-exporter-test/pom.xml": { + "maven": ["io.prometheus:it-common", "io.prometheus:it-exporter"] + }, + "integration-tests/it-exporter/it-no-protobuf-test/pom.xml": { + "maven": ["io.prometheus:it-common", "io.prometheus:it-exporter"] + }, + "integration-tests/it-exporter/pom.xml": { + "maven": ["io.prometheus:integration-tests"] + }, + "integration-tests/it-pushgateway/pom.xml": { + "maven": [ + "com.jayway.jsonpath:json-path", + "com.squareup.okhttp:okhttp", + "io.prometheus:integration-tests", + "io.prometheus:it-common", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-pushgateway" + ] + }, + "integration-tests/it-spring-boot-smoke-test/pom.xml": { + "maven": [ + "com.diffplug.spotless:spotless-maven-plugin", + "io.prometheus:it-common", + "io.prometheus:prometheus-metrics-bom", + "org.junit:junit-bom", + "org.springframework.boot:spring-boot-starter-parent" + ] + }, + "integration-tests/pom.xml": { + "maven": [ + "commons-io:commons-io", + "io.prometheus:client_java", + "org.testcontainers:junit-jupiter" + ] + }, + "mise.toml": { + "mise": [ + "go:github.com/gohugoio/hugo", + "go:github.com/grafana/oats", + "java", + "lychee", + "node", + "npm:renovate", + "protoc" + ], + "regex": ["ghcr.io/super-linter/super-linter", "grafana/flint"] + }, + "mvnw": { + "maven-wrapper": ["maven-wrapper"] + }, + "mvnw.cmd": { + "maven-wrapper": ["maven-wrapper"] + }, + "pom.xml": { + "maven": [ + "com.google.code.findbugs:jsr305", + "com.google.errorprone:error_prone_core", + "com.google.guava:guava", + "com.google.protobuf:protobuf-java", + "com.uber.nullaway:nullaway", + "io.opentelemetry.instrumentation:opentelemetry-instrumentation-bom-alpha", + "io.opentelemetry:opentelemetry-proto", + "io.prometheus:client_java_parent", + "org.apache.felix:maven-bundle-plugin", + "org.apache.maven.plugins:maven-checkstyle-plugin", + "org.apache.maven.plugins:maven-clean-plugin", + "org.apache.maven.plugins:maven-compiler-plugin", + "org.apache.maven.plugins:maven-dependency-plugin", + "org.apache.maven.plugins:maven-deploy-plugin", + "org.apache.maven.plugins:maven-enforcer-plugin", + "org.apache.maven.plugins:maven-failsafe-plugin", + "org.apache.maven.plugins:maven-install-plugin", + "org.apache.maven.plugins:maven-jar-plugin", + "org.apache.maven.plugins:maven-javadoc-plugin", + "org.apache.maven.plugins:maven-resources-plugin", + "org.apache.maven.plugins:maven-shade-plugin", + "org.apache.maven.plugins:maven-site-plugin", + "org.apache.maven.plugins:maven-surefire-plugin", + "org.assertj:assertj-core", + "org.awaitility:awaitility", + "org.codehaus.mojo:build-helper-maven-plugin", + "org.codehaus.mojo:exec-maven-plugin", + "org.codehaus.mojo:versions-maven-plugin", + "org.jacoco:jacoco-maven-plugin", + "org.junit-pioneer:junit-pioneer", + "org.junit.jupiter:junit-jupiter", + "org.junit.jupiter:junit-jupiter-params", + "org.junit:junit-bom", + "org.mockito:mockito-core", + "org.slf4j:slf4j-simple", + "org.wiremock:wiremock" + ] + }, + "prometheus-metrics-bom/pom.xml": { + "maven": [ + "io.prometheus:client_java_parent", + "io.prometheus:prometheus-metrics-config", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-common", + "io.prometheus:prometheus-metrics-exporter-httpserver", + "io.prometheus:prometheus-metrics-exporter-opentelemetry", + "io.prometheus:prometheus-metrics-exporter-opentelemetry-no-otel", + "io.prometheus:prometheus-metrics-exporter-opentelemetry-otel-agent-resources", + "io.prometheus:prometheus-metrics-exporter-pushgateway", + "io.prometheus:prometheus-metrics-exporter-servlet-jakarta", + "io.prometheus:prometheus-metrics-exporter-servlet-javax", + "io.prometheus:prometheus-metrics-exposition-formats", + "io.prometheus:prometheus-metrics-exposition-formats-no-protobuf", + "io.prometheus:prometheus-metrics-exposition-textformats", + "io.prometheus:prometheus-metrics-instrumentation-caffeine", + "io.prometheus:prometheus-metrics-instrumentation-dropwizard", + "io.prometheus:prometheus-metrics-instrumentation-dropwizard5", + "io.prometheus:prometheus-metrics-instrumentation-guava", + "io.prometheus:prometheus-metrics-instrumentation-jvm", + "io.prometheus:prometheus-metrics-model", + "io.prometheus:prometheus-metrics-otel-support", + "io.prometheus:prometheus-metrics-simpleclient-bridge", + "io.prometheus:prometheus-metrics-tracer", + "io.prometheus:prometheus-metrics-tracer-common", + "io.prometheus:prometheus-metrics-tracer-initializer", + "io.prometheus:prometheus-metrics-tracer-otel", + "io.prometheus:prometheus-metrics-tracer-otel-agent" + ] + }, + "prometheus-metrics-config/pom.xml": { + "maven": ["io.prometheus:client_java"] + }, + "prometheus-metrics-core/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-config", + "io.prometheus:prometheus-metrics-exposition-formats-no-protobuf", + "io.prometheus:prometheus-metrics-model", + "io.prometheus:prometheus-metrics-tracer-initializer", + "org.apache.commons:commons-math3" + ] + }, + "prometheus-metrics-exporter-common/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exposition-formats", + "io.prometheus:prometheus-metrics-exposition-textformats", + "io.prometheus:prometheus-metrics-model" + ] + }, + "prometheus-metrics-exporter-httpserver/pom.xml": { + "maven": ["io.prometheus:client_java", "io.prometheus:prometheus-metrics-exporter-common"] + }, + "prometheus-metrics-exporter-opentelemetry-otel-agent-resources/pom.xml": { + "maven": [ + "io.opentelemetry:opentelemetry-api", + "io.opentelemetry:opentelemetry-context", + "io.prometheus:client_java" + ] + }, + "prometheus-metrics-exporter-opentelemetry-shaded/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-opentelemetry-otel-agent-resources" + ] + }, + "prometheus-metrics-exporter-opentelemetry/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-opentelemetry-otel-agent-resources" + ] + }, + "prometheus-metrics-exporter-pushgateway/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-common", + "org.mock-server:mockserver-netty-no-dependencies" + ] + }, + "prometheus-metrics-exporter-servlet-jakarta/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-common", + "jakarta.servlet:jakarta.servlet-api" + ] + }, + "prometheus-metrics-exporter-servlet-javax/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-common", + "javax.servlet:javax.servlet-api" + ] + }, + "prometheus-metrics-exposition-formats-shaded/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-exposition-textformats" + ] + }, + "prometheus-metrics-exposition-formats/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-exposition-textformats" + ] + }, + "prometheus-metrics-exposition-textformats/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-config", + "io.prometheus:prometheus-metrics-model" + ] + }, + "prometheus-metrics-instrumentation-caffeine/pom.xml": { + "maven": [ + "com.github.ben-manes.caffeine:caffeine", + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exposition-textformats" + ] + }, + "prometheus-metrics-instrumentation-dropwizard/pom.xml": { + "maven": [ + "io.dropwizard.metrics:metrics-core", + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-httpserver", + "io.prometheus:prometheus-metrics-exposition-textformats", + "io.prometheus:prometheus-metrics-instrumentation-dropwizard5" + ] + }, + "prometheus-metrics-instrumentation-dropwizard5/pom.xml": { + "maven": [ + "io.dropwizard.metrics5:metrics-core", + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-httpserver", + "io.prometheus:prometheus-metrics-exposition-textformats" + ] + }, + "prometheus-metrics-instrumentation-guava/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exposition-textformats" + ] + }, + "prometheus-metrics-instrumentation-jvm/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-core", + "io.prometheus:prometheus-metrics-exporter-httpserver", + "io.prometheus:prometheus-metrics-exposition-textformats" + ] + }, + "prometheus-metrics-model/pom.xml": { + "maven": ["io.prometheus:client_java", "io.prometheus:prometheus-metrics-config"] + }, + "prometheus-metrics-otel-support/pom.xml": { + "maven": ["io.prometheus:client_java"] + }, + "prometheus-metrics-parent/pom.xml": { + "maven": [ + "com.diffplug.spotless:spotless-maven-plugin", + "org.apache.maven.plugins:maven-gpg-plugin", + "org.apache.maven.plugins:maven-source-plugin", + "org.sonatype.central:central-publishing-maven-plugin" + ] + }, + "prometheus-metrics-simpleclient-bridge/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:prometheus-metrics-config", + "io.prometheus:prometheus-metrics-exposition-textformats", + "io.prometheus:prometheus-metrics-model", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_common" + ] + }, + "prometheus-metrics-tracer/pom.xml": { + "maven": ["io.prometheus:client_java"] + }, + "prometheus-metrics-tracer/prometheus-metrics-tracer-common/pom.xml": { + "maven": ["io.prometheus:prometheus-metrics-tracer"] + }, + "prometheus-metrics-tracer/prometheus-metrics-tracer-initializer/pom.xml": { + "maven": [ + "io.prometheus:prometheus-metrics-tracer", + "io.prometheus:prometheus-metrics-tracer-common", + "io.prometheus:prometheus-metrics-tracer-otel", + "io.prometheus:prometheus-metrics-tracer-otel-agent" + ] + }, + "prometheus-metrics-tracer/prometheus-metrics-tracer-otel-agent/pom.xml": { + "maven": [ + "io.prometheus:prometheus-metrics-tracer", + "io.prometheus:prometheus-metrics-tracer-common" + ] + }, + "prometheus-metrics-tracer/prometheus-metrics-tracer-otel/pom.xml": { + "maven": [ + "io.prometheus:prometheus-metrics-tracer", + "io.prometheus:prometheus-metrics-tracer-common" + ] + }, + "simpleclient-archive/integration_tests/it_common/pom.xml": { + "maven": ["io.prometheus:integration_tests"] + }, + "simpleclient-archive/integration_tests/it_exemplars_otel_agent/pom.xml": { + "maven": [ + "ch.qos.logback:logback-classic", + "io.prometheus:integration_tests", + "io.prometheus:it_common", + "io.prometheus:simpleclient_bom", + "io.prometheus:simpleclient_hotspot", + "io.prometheus:simpleclient_servlet", + "org.springframework.boot:spring-boot-dependencies", + "org.springframework.boot:spring-boot-maven-plugin" + ] + }, + "simpleclient-archive/integration_tests/it_exemplars_otel_sdk/pom.xml": { + "maven": [ + "io.opentelemetry:opentelemetry-api", + "io.opentelemetry:opentelemetry-sdk", + "io.prometheus:integration_tests", + "io.prometheus:it_common", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_httpserver" + ] + }, + "simpleclient-archive/integration_tests/it_java_versions/pom.xml": { + "maven": [ + "io.prometheus:integration_tests", + "io.prometheus:it_common", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_hotspot", + "io.prometheus:simpleclient_httpserver" + ] + }, + "simpleclient-archive/integration_tests/it_log4j2/pom.xml": { + "maven": [ + "io.prometheus:integration_tests", + "io.prometheus:it_common", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_httpserver", + "io.prometheus:simpleclient_log4j2", + "org.apache.logging.log4j:log4j-api", + "org.apache.logging.log4j:log4j-core" + ] + }, + "simpleclient-archive/integration_tests/it_pushgateway/pom.xml": { + "maven": [ + "ch.qos.logback:logback-classic", + "com.squareup.okhttp3:okhttp", + "io.prometheus:integration_tests", + "io.prometheus:it_common", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_pushgateway" + ] + }, + "simpleclient-archive/integration_tests/it_servlet_jakarta_exporter_webxml/pom.xml": { + "maven": [ + "ch.qos.logback:logback-classic", + "com.squareup.okhttp3:okhttp", + "io.prometheus:integration_tests", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_hotspot", + "io.prometheus:simpleclient_servlet_jakarta", + "jakarta.servlet:jakarta.servlet-api", + "org.apache.maven.plugins:maven-war-plugin" + ] + }, + "simpleclient-archive/integration_tests/pom.xml": { + "maven": [ + "ch.qos.logback:logback-classic", + "com.squareup.okhttp3:okhttp", + "io.prometheus:client_java", + "org.testcontainers:testcontainers" + ] + }, + "simpleclient-archive/simpleclient_graphite_bridge/pom.xml": { + "maven": ["io.prometheus:client_java", "io.prometheus:simpleclient"] + }, + "simpleclient-archive/simpleclient_hibernate/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:simpleclient", + "org.hibernate:hibernate-core" + ] + }, + "simpleclient-archive/simpleclient_httpserver/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_common", + "javax.xml.bind:jaxb-api" + ] + }, + "simpleclient-archive/simpleclient_jetty/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:simpleclient", + "org.eclipse.jetty:jetty-server", + "org.eclipse.jetty:jetty-servlet", + "org.hamcrest:hamcrest-all" + ] + }, + "simpleclient-archive/simpleclient_jetty_jdk8/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:simpleclient", + "org.eclipse.jetty:jetty-server", + "org.eclipse.jetty:jetty-servlet", + "org.hamcrest:hamcrest-all" + ] + }, + "simpleclient-archive/simpleclient_log4j/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:simpleclient", + "org.apache.logging.log4j:log4j-1.2-api", + "org.apache.logging.log4j:log4j-core" + ] + }, + "simpleclient-archive/simpleclient_log4j2/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:simpleclient", + "org.apache.logging.log4j:log4j-core" + ] + }, + "simpleclient-archive/simpleclient_logback/pom.xml": { + "maven": [ + "ch.qos.logback:logback-classic", + "io.prometheus:client_java", + "io.prometheus:simpleclient" + ] + }, + "simpleclient-archive/simpleclient_servlet/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_common", + "io.prometheus:simpleclient_servlet_common", + "javax.servlet:javax.servlet-api", + "org.eclipse.jetty:jetty-servlet" + ] + }, + "simpleclient-archive/simpleclient_servlet_common/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_common" + ] + }, + "simpleclient-archive/simpleclient_servlet_jakarta/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_common", + "io.prometheus:simpleclient_servlet_common", + "jakarta.servlet:jakarta.servlet-api", + "org.eclipse.jetty:jetty-servlet" + ] + }, + "simpleclient-archive/simpleclient_spring_web/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_common", + "org.apache.commons:commons-lang3", + "org.aspectj:aspectjweaver", + "org.springframework:spring-aop", + "org.springframework:spring-context", + "org.springframework:spring-test", + "org.springframework:spring-web" + ] + }, + "simpleclient-archive/simpleclient_vertx/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_common", + "io.vertx:vertx-web" + ] + }, + "simpleclient-archive/simpleclient_vertx4/pom.xml": { + "maven": [ + "io.prometheus:client_java", + "io.prometheus:simpleclient", + "io.prometheus:simpleclient_common", + "io.vertx:vertx-web" + ] + } +} diff --git a/.github/renovate.json5 b/.github/renovate.json5 new file mode 100644 index 000000000..2b6af73f7 --- /dev/null +++ b/.github/renovate.json5 @@ -0,0 +1,43 @@ +{ + $schema: "https://docs.renovatebot.com/renovate-schema.json", + extends: ["config:best-practices", "config:recommended", "github>grafana/flint"], + platformCommit: "enabled", + automerge: true, + ignorePaths: [ + "**/simpleclient-archive/**", + // old projects + // agent resources packages an OTel API that is the minimum required API version + "**/prometheus-metrics-exporter-opentelemetry-otel-agent-resources/pom.xml", + ], + labels: ["dependencies"], + packageRules: [ + { + matchFileNames: ["mise.toml"], + matchDepNames: ["java"], + groupName: "java temurin", + additionalBranchPrefix: "temurin-", + }, + { + matchFileNames: [".mise/envs/native/mise.toml"], + matchDepNames: ["java"], + groupName: "java graalvm", + additionalBranchPrefix: "graalvm-", + }, + { + matchPackageNames: ["io.opentelemetry.instrumentation:opentelemetry-instrumentation-bom-alpha"], + ignoreUnstable: false, + }, + { + enabled: false, + description: "Ignore internal project modules", + matchPackageNames: ["/^io\\.prometheus:(examples|example-.+|integration-tests|it-.+)$/"], + }, + { + description: "Group protobuf-java and protoc together so generated code can be updated in one PR", + matchDepNames: ["com.google.protobuf:protobuf-java", "protoc"], + groupName: "protobuf", + separateMajorMinor: false, + }, + ], + customManagers: [], +} diff --git a/.github/workflows/acceptance-tests.yml b/.github/workflows/acceptance-tests.yml new file mode 100644 index 000000000..5884b998d --- /dev/null +++ b/.github/workflows/acceptance-tests.yml @@ -0,0 +1,21 @@ +--- +name: OpenTelemetry Acceptance Tests + +on: [pull_request] + +permissions: {} + +jobs: + acceptance-tests: + runs-on: ubuntu-24.04 + steps: + - name: Check out + with: + persist-credentials: false + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + - uses: jdx/mise-action@5228313ee0372e111a38da051671ca30fc5a96db # v3.6.3 + with: + version: v2026.2.24 + sha256: c6e8b1abbd02d4beb7f38c98174e647b4ae40e89422465bc3b49e48c0bdf9ba9 + - name: Run acceptance tests + run: mise run acceptance-test diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000..2691884e3 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,27 @@ +--- +name: Build + +on: [pull_request] + +permissions: {} + +jobs: + build: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false + - uses: jdx/mise-action@5228313ee0372e111a38da051671ca30fc5a96db # v3.6.3 + with: + version: v2026.2.24 + sha256: c6e8b1abbd02d4beb7f38c98174e647b4ae40e89422465bc3b49e48c0bdf9ba9 + - name: Cache local Maven repository + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven- + - name: Run the Maven verify phase + run: mise run ci diff --git a/.github/workflows/generate-protobuf.yml b/.github/workflows/generate-protobuf.yml new file mode 100644 index 000000000..3c24ddad3 --- /dev/null +++ b/.github/workflows/generate-protobuf.yml @@ -0,0 +1,64 @@ +--- +name: Generate Protobuf + +on: + push: + branches: + - "renovate/protobuf" + +permissions: {} + +jobs: + generate: + runs-on: ubuntu-24.04 + permissions: + contents: write + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + ref: ${{ github.ref }} + # zizmor: ignore[artipacked] -- needs credentials to push + persist-credentials: true + - uses: jdx/mise-action@5228313ee0372e111a38da051671ca30fc5a96db # v3.6.3 + with: + version: v2026.2.11 + sha256: 3e1baedb9284124b770d2d561a04a98c343d05967c83deb8b35c7c941f8d9c9a + - name: Cache local Maven repository + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven- + - name: Verify both protobuf deps are updated + run: | + git fetch origin main + DIFF_POM=$(git diff origin/main -- pom.xml) + DIFF_MISE=$(git diff origin/main -- mise.toml) + if ! echo "$DIFF_POM" | grep -q 'protobuf-java.version'; then + echo "::error::protobuf-java not updated in pom.xml" + exit 1 + fi + if ! echo "$DIFF_MISE" | grep -q 'protoc'; then + echo "::error::protoc not updated in mise.toml" + exit 1 + fi + - name: Generate protobuf sources + run: mise run generate + - name: Commit and push generated sources + run: | + git diff --quiet && exit 0 + UNEXPECTED=$(git diff --name-only | grep -v '\.java$' || true) + if [[ -n "$UNEXPECTED" ]]; then + echo "::error::Unexpected files changed:" + echo "$UNEXPECTED" + exit 1 + fi + # Note: GITHUB_TOKEN pushes don't trigger CI re-runs. + # Close and reopen the PR to trigger CI after this commit. + # TODO: switch to PROMBOT_GITHUB_TOKEN once it's added to this repo. + git config user.name "github-actions[bot]" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + git add '*.java' + git commit -m "chore: regenerate protobuf sources" + git push diff --git a/.github/workflows/github-pages.yaml b/.github/workflows/github-pages.yaml index 6edc0639a..d774424ef 100644 --- a/.github/workflows/github-pages.yaml +++ b/.github/workflows/github-pages.yaml @@ -5,18 +5,18 @@ on: push: branches: - main + tags: + - "v*.*.*" # updates the version in the docs # Allows you to run this workflow manually from the Actions tab workflow_dispatch: -# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages -permissions: - contents: read - pages: write - id-token: write +permissions: {} -# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. -# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +# Allow only one concurrent deployment, skipping runs queued between +# the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow +# these production deployments to complete. concurrency: group: "pages" cancel-in-progress: false @@ -29,74 +29,45 @@ defaults: jobs: # Build job build: - runs-on: ubuntu-latest - env: - HUGO_VERSION: 0.115.4 - JAVA_HOME: /usr/lib/jvm/java-17-openjdk-amd64 + if: github.repository == 'prometheus/client_java' + runs-on: ubuntu-24.04 steps: - - name: Install OpenJDK 17 - run: sudo apt-get -q install -y openjdk-17-jdk - - name: Make 17 the default java version - run: sudo update-alternatives --set java /usr/lib/jvm/java-17-openjdk-amd64/bin/java - - name: Make 17 the default javadoc version - run: sudo update-alternatives --set javadoc /usr/lib/jvm/java-17-openjdk-amd64/bin/javadoc - - name: Print java and javadoc versions - run: | - echo 'java --version' && \ - java --version && \ - echo 'javadoc --version' && \ - javadoc --version && \ - echo 'echo $JAVA_HOME' && \ - echo $JAVA_HOME - - name: Install Hugo CLI - run: | - wget -O ${{ runner.temp }}/hugo.deb https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_extended_${HUGO_VERSION}_linux-amd64.deb \ - && sudo dpkg -i ${{ runner.temp }}/hugo.deb - #- name: Install Dart Sass - # run: sudo snap install dart-sass - - name: Checkout - uses: actions/checkout@v3 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: - submodules: recursive + persist-credentials: false + fetch-tags: "true" fetch-depth: 0 - - name: Build client_java - run: ./mvnw -B clean install -DskipTests - - name: Make Javadoc - run: ./mvnw -B clean compile javadoc:javadoc javadoc:aggregate - - name: Move the Javadoc to docs/static/api/ - run: mv ./target/site/apidocs ./docs/static/api && echo && echo 'ls ./docs/static/api' && ls ./docs/static/api + - uses: jdx/mise-action@5228313ee0372e111a38da051671ca30fc5a96db # v3.6.3 + with: + version: v2026.2.24 + sha256: c6e8b1abbd02d4beb7f38c98174e647b4ae40e89422465bc3b49e48c0bdf9ba9 + cache: "false" - name: Setup Pages id: pages - uses: actions/configure-pages@v3 - - name: Install Node.js dependencies - run: "[[ -f package-lock.json || -f npm-shrinkwrap.json ]] && npm ci || true" - working-directory: ./docs - - name: Build with Hugo + uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # v5 + - name: Build GitHub Pages + run: mise run build-gh-pages env: - # For maximum backward compatibility with Hugo modules - HUGO_ENVIRONMENT: production - HUGO_ENV: production - run: | - hugo \ - --gc \ - --minify \ - --baseURL "${{ steps.pages.outputs.base_url }}/" - working-directory: ./docs - - name: ls ./docs/public/api - run: echo 'ls ./docs/public/api' && ls ./docs/public/api + BASE_URL: "${{ steps.pages.outputs.base_url }}/" - name: Upload artifact - uses: actions/upload-pages-artifact@v1 + uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # v4 with: path: ./docs/public + # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages # Deployment job deploy: + if: github.repository == 'prometheus/client_java' + permissions: + contents: read + pages: write + id-token: write environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: build steps: - name: Deploy to GitHub Pages id: deployment - uses: actions/deploy-pages@v2 + uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4 diff --git a/.github/workflows/issue-management-stale-action.yml b/.github/workflows/issue-management-stale-action.yml new file mode 100644 index 000000000..495904d22 --- /dev/null +++ b/.github/workflows/issue-management-stale-action.yml @@ -0,0 +1,59 @@ +--- +name: Issue management - run stale action + +on: + schedule: + # hourly at minute 23 + - cron: "23 6 * * *" + workflow_dispatch: + +permissions: {} + +concurrency: + group: "stale-issues-and-prs" + cancel-in-progress: false + +jobs: + stale: + permissions: + contents: read + actions: write # because actions/stale deletes its old cache before saving new one + issues: write # for actions/stale to close stale issues + pull-requests: write # for actions/stale to close stale PRs + runs-on: ubuntu-latest + steps: + # Handle stale PRs + # - After 120 days inactive: Adds "stale" label + warning comment + # - After 30 more days inactive: Closes + - uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0 + with: + days-before-issue-stale: -1 + days-before-issue-close: -1 + days-before-pr-stale: 120 + days-before-pr-close: 30 + stale-pr-label: stale + stale-pr-message: > + This PR has been marked as stale due to 120 days of inactivity. + It will be automatically closed if there is no further activity over the next 30 days. + close-pr-message: > + This PR was automatically closed due to lack of activity after being marked stale. Feel + free to reopen if you would like to continue working on it. + operations-per-run: 1000 + + # Handle stale issues + # - After 360 days (12 months) inactive: Adds "stale" label + warning comment + # - After 30 more days inactive: Closes + - uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0 + with: + days-before-issue-stale: 360 + days-before-issue-close: 30 + days-before-pr-stale: -1 + days-before-pr-close: -1 + stale-issue-label: stale + stale-issue-message: > + This issue has been marked as stale due to 12 months of inactivity. + It will be automatically closed if there is no further activity over the next 30 days. + close-issue-message: > + This issue was automatically closed due to lack of activity after being marked stale. + Feel free to reopen if you would like to continue working on it. + operations-per-run: 1000 diff --git a/.github/workflows/java-version-matrix-tests.yml b/.github/workflows/java-version-matrix-tests.yml new file mode 100644 index 000000000..1d58513cc --- /dev/null +++ b/.github/workflows/java-version-matrix-tests.yml @@ -0,0 +1,84 @@ +--- +name: Integration Tests - Java Version Compatibility Matrix + +on: + pull_request: + paths: + - 'integration-tests/**' + - 'prometheus-metrics-core/**' + - 'prometheus-metrics-exporter-*/**' + - 'prometheus-metrics-exposition-*/**' + - '.github/workflows/java-version-matrix-tests.yml' + push: + branches: + - main + workflow_dispatch: + +permissions: {} + +jobs: + integration-tests: + name: Java ${{ matrix.java-version }} + runs-on: ubuntu-24.04 + strategy: + fail-fast: false + matrix: + # Note: Java 8 runtime testing is skipped due to Spotless incompatibility + java-version: [11, 17, 21, 25] + steps: + - name: Check out + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false + + - name: Set up mise + uses: jdx/mise-action@5228313ee0372e111a38da051671ca30fc5a96db # v3.6.3 + with: + version: v2026.2.24 + sha256: c6e8b1abbd02d4beb7f38c98174e647b4ae40e89422465bc3b49e48c0bdf9ba9 + + - name: Cache local Maven repository + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven- + + - name: Build core library artifacts + run: mise exec -- ./mvnw install -DskipTests -Dspotless.check.skip=true -Dcoverage.skip=true -Dcheckstyle.skip=true -Dwarnings=-nowarn -pl '!integration-tests' + + - name: Install parent POMs + run: | + cd integration-tests + mise exec -- ../mvnw clean install -N -Dspotless.skip=true + cd it-exporter + mise exec -- ../../mvnw install -N -Dspotless.skip=true + + - name: Rebuild sample apps targeting Java ${{ matrix.java-version }} + run: | + cd integration-tests + # Note: Jetty 12 and Tomcat 11 require Java 17+, so servlet samples are skipped for Java 11 + if [ "${{ matrix.java-version }}" = "11" ]; then + MODULES="it-common,it-exporter/it-exporter-httpserver-sample,it-exporter/it-exporter-no-protobuf,it-pushgateway" + else + MODULES="it-common,it-exporter/it-exporter-httpserver-sample,it-exporter/it-exporter-servlet-tomcat-sample,it-exporter/it-exporter-servlet-jetty-sample,it-exporter/it-exporter-no-protobuf,it-pushgateway" + fi + mise exec -- ../mvnw clean install -DskipTests -Dspotless.skip=true -Dcoverage.skip=true -Dcheckstyle.skip=true -Dwarnings=-nowarn \ + -Djava.version=${{ matrix.java-version }} \ + -Dmaven.compiler.release=${{ matrix.java-version }} \ + -pl $MODULES + + - name: Run integration tests + env: + TEST_JAVA_VERSION: ${{ matrix.java-version }} + run: | + cd integration-tests + # Note: Servlet tests require Java 17+ (due to Jetty 12 and Tomcat 11) + if [ "${{ matrix.java-version }}" = "11" ]; then + TEST_MODULES="it-exporter/it-no-protobuf-test,it-pushgateway" + else + TEST_MODULES="it-exporter/it-exporter-test,it-exporter/it-no-protobuf-test,it-pushgateway" + fi + mise exec -- ../mvnw verify -T 2C -Dspotless.skip=true -Dcoverage.skip=true -Dcheckstyle.skip=true -Dwarnings=-nowarn \ + -pl $TEST_MODULES diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 000000000..37685e994 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,34 @@ +--- +name: Lint + +on: + pull_request: + +permissions: {} + +jobs: + lint: + runs-on: ubuntu-24.04 + + permissions: + contents: read + + steps: + - name: Checkout code + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false + fetch-depth: 0 # needed for git diff --merge-base in lint:links + + - name: Setup mise + uses: jdx/mise-action@5228313ee0372e111a38da051671ca30fc5a96db # v3.6.3 + with: + version: v2026.2.24 + sha256: c6e8b1abbd02d4beb7f38c98174e647b4ae40e89422465bc3b49e48c0bdf9ba9 + + - name: Lint + env: + GITHUB_TOKEN: ${{ github.token }} + GITHUB_HEAD_SHA: ${{ github.event.pull_request.head.sha }} + PR_HEAD_REPO: ${{ github.event.pull_request.head.repo.full_name }} + run: mise run lint diff --git a/.github/workflows/multi-version-test.yml b/.github/workflows/multi-version-test.yml new file mode 100644 index 000000000..92318c1aa --- /dev/null +++ b/.github/workflows/multi-version-test.yml @@ -0,0 +1,39 @@ +--- +name: Java-Version Compatibility Tests + +on: [pull_request] + +permissions: {} + +jobs: + compatibility-test: + name: Test on Java ${{ matrix.java }} + runs-on: ubuntu-24.04 + strategy: + fail-fast: false + matrix: + java: [17, 21, 25] + steps: + - name: Check out + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false + + - name: Set up Java ${{ matrix.java }} + id: setup-java + uses: actions/setup-java@be666c2fcd27ec809703dec50e508c2fdc7f6654 # v5 + with: + distribution: "temurin" + java-version: ${{ matrix.java }} + + - name: Cache local Maven repository + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-java${{ matrix.java }}-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven-java${{ matrix.java }}- + ${{ runner.os }}-maven- + + - name: Build and test on Java ${{ matrix.java }} + run: ./mvnw clean install -Dtest.java.version=${{ matrix.java }} -Dspotless.skip=true -Dcheckstyle.skip=true -Dwarnings=-nowarn -Dcoverage.skip=true diff --git a/.github/workflows/native-tests.yml b/.github/workflows/native-tests.yml new file mode 100644 index 000000000..fb49ce3ea --- /dev/null +++ b/.github/workflows/native-tests.yml @@ -0,0 +1,23 @@ +--- +name: GraalVM Native Tests + +on: [pull_request] + +permissions: {} + +jobs: + native-tests: + runs-on: ubuntu-24.04 + steps: + - name: Check out + with: + persist-credentials: false + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + - uses: jdx/mise-action@5228313ee0372e111a38da051671ca30fc5a96db # v3.6.3 + with: + version: v2026.2.24 + sha256: c6e8b1abbd02d4beb7f38c98174e647b4ae40e89422465bc3b49e48c0bdf9ba9 + working_directory: .mise/envs/native + - name: Run native tests + working-directory: .mise/envs/native + run: mise native-test diff --git a/.github/workflows/nightly-benchmarks.yml b/.github/workflows/nightly-benchmarks.yml new file mode 100644 index 000000000..4954f8015 --- /dev/null +++ b/.github/workflows/nightly-benchmarks.yml @@ -0,0 +1,102 @@ +--- +name: Nightly Benchmarks + +on: + schedule: + # Run at 2 AM UTC every day + - cron: "0 2 * * *" + workflow_dispatch: + inputs: + jmh_args: + description: "Additional JMH arguments (e.g., '-f 1 -wi 1 -i 3' for quick run)" + required: false + default: "" + +permissions: {} + +concurrency: + group: "benchmarks" + +defaults: + run: + shell: bash + +jobs: + benchmark: + runs-on: ubuntu-24.04 + permissions: + contents: write + steps: + - name: Checkout main branch + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: true + fetch-depth: 0 + + - name: Setup mise + uses: jdx/mise-action@5228313ee0372e111a38da051671ca30fc5a96db # v3.6.3 + with: + version: v2026.2.24 + sha256: c6e8b1abbd02d4beb7f38c98174e647b4ae40e89422465bc3b49e48c0bdf9ba9 + + - name: Cache local Maven repository + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven- + + - name: Run JMH benchmarks + run: mise run benchmark:ci-json + env: + JMH_ARGS: ${{ github.event.inputs.jmh_args }} + + - name: Generate benchmark summary + run: | + mise run benchmark:generate-summary \ + --input benchmark-results.json \ + --output-dir benchmark-results \ + --commit-sha "${{ github.sha }}" + env: + GITHUB_REPOSITORY: ${{ github.repository }} + + - name: Commit and push results to benchmarks branch + run: | + # Save results to a temp location + mkdir -p /tmp/benchmark-output + cp -r benchmark-results/* /tmp/benchmark-output/ + + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + + # Checkout or create benchmarks branch (use -- to disambiguate from benchmarks/ directory) + if git ls-remote --heads origin benchmarks | grep -q benchmarks; then + git fetch origin benchmarks + git switch benchmarks + # Preserve existing history + if [ -d history ]; then + cp -r history /tmp/benchmark-output/ + fi + else + git switch --orphan benchmarks + fi + + # Clean working directory + git rm -rf . 2>/dev/null || true + find . -mindepth 1 -maxdepth 1 ! -name '.git' -exec rm -rf {} + + + # Copy only the benchmark results + cp -r /tmp/benchmark-output/* . + + git add README.md results.json history/ + + DATE=$(date -u +"%Y-%m-%d") + COMMIT_SHORT=$(echo "${{ github.sha }}" | cut -c1-7) + + git commit \ + -m "Benchmark results for ${DATE} (${COMMIT_SHORT})" \ + -m "From commit ${{ github.sha }}" \ + || echo "No changes to commit" + + git push origin benchmarks --force-with-lease || git push origin benchmarks diff --git a/.github/workflows/pr-title.yml b/.github/workflows/pr-title.yml new file mode 100644 index 000000000..76a13e3df --- /dev/null +++ b/.github/workflows/pr-title.yml @@ -0,0 +1,18 @@ +--- +name: PR Title + +on: + pull_request: + types: + - opened + - edited +permissions: + pull-requests: read + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: amannn/action-semantic-pull-request@48f256284bd46cdaab1048c3721360e808335d50 # v6.1.1 + env: + GITHUB_TOKEN: ${{ github.token }} diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml new file mode 100644 index 000000000..75cf34f16 --- /dev/null +++ b/.github/workflows/release-please.yml @@ -0,0 +1,23 @@ +--- +name: Release Please + +on: + push: + branches: + - main + +permissions: + contents: write + pull-requests: write + +jobs: + release-please: + if: ${{ github.repository == 'prometheus/client_java' }} + runs-on: ubuntu-24.04 + steps: + - uses: googleapis/release-please-action@16a9c90856f42705d54a6fda1823352bdc62cf38 # v4.4.0 + id: release-please + with: + token: ${{ secrets.GITHUB_TOKEN }} + config-file: .github/config/release-please-config.json + manifest-file: .github/config/.release-please-manifest.json diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 000000000..0db236d00 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,52 @@ +--- +name: Deploy to Maven Central + +on: + push: + tags: + - "v*.*.*" + +jobs: + deploy: + if: ${{ github.repository == 'prometheus/client_java' }} + runs-on: ubuntu-24.04 + permissions: {} + + steps: + - name: Debug gpg key + env: + GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }} + run: | + echo "${#GPG_SIGNING_KEY}" + echo "${GPG_SIGNING_KEY}" | gpg --batch --import-options import-show --import + - name: Checkout Plugin Repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false + + - uses: jdx/mise-action@5228313ee0372e111a38da051671ca30fc5a96db # v3.6.3 + with: + version: v2026.2.24 + sha256: c6e8b1abbd02d4beb7f38c98174e647b4ae40e89422465bc3b49e48c0bdf9ba9 + cache: false + + - name: Build release version + run: mise run build-release + + - name: Set up Apache Maven Central + uses: actions/setup-java@be666c2fcd27ec809703dec50e508c2fdc7f6654 # v5 + with: + distribution: "temurin" + java-version: "21" + server-id: ossrh + server-username: MAVEN_USERNAME + server-password: MAVEN_CENTRAL_TOKEN + gpg-private-key: ${{ secrets.GPG_SIGNING_KEY }} + gpg-passphrase: MAVEN_GPG_PASSPHRASE + + - name: Publish to Apache Maven Central + run: mvn deploy -P 'release,!default' -Dmaven.test.skip=true + env: + MAVEN_USERNAME: ${{ secrets.SONATYPE_MAVEN_REPOSITORY_USERNAME }} + MAVEN_CENTRAL_TOKEN: ${{ secrets.SONATYPE_MAVEN_REPOSITORY_PASSWORD }} + MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_SIGNING_PASSPHRASE }} diff --git a/.github/workflows/test-release-build.yml b/.github/workflows/test-release-build.yml new file mode 100644 index 000000000..d58b682a7 --- /dev/null +++ b/.github/workflows/test-release-build.yml @@ -0,0 +1,37 @@ +--- +name: Test Build Release + +on: + push: + branches: ["main"] + pull_request: + branches: ["main"] + +permissions: {} + +jobs: + build: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false + fetch-tags: "true" + fetch-depth: 0 + - uses: jdx/mise-action@5228313ee0372e111a38da051671ca30fc5a96db # v3.6.3 + with: + version: v2026.2.24 + sha256: c6e8b1abbd02d4beb7f38c98174e647b4ae40e89422465bc3b49e48c0bdf9ba9 + - name: Cache local Maven repository + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven- + - name: Build GitHub Pages + run: mise run build-gh-pages + env: + BASE_URL: "/client_java" + - name: Build release version + run: mise run build-release diff --git a/.gitignore b/.gitignore index 14ec2f863..b98fa5703 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ *.iml target/ +build/ simpleclient_pushgateway/mockserver.log simpleclient_pushgateway/mockserver_request.log nb-configuration.xml @@ -17,3 +18,11 @@ dependency-reduced-pom.xml **/.classpath **.project **/.settings/ +docs/public +.lycheecache + +benchmark-results/ +benchmark-results.json +benchmark-output.log + +*.DS_Store \ No newline at end of file diff --git a/.gitleaksignore b/.gitleaksignore new file mode 100644 index 000000000..605fefa97 --- /dev/null +++ b/.gitleaksignore @@ -0,0 +1,2 @@ +/tmp/lint/integration-tests/it-pushgateway/src/test/resources/pushgateway-ssl.yaml:private-key:36 +/github/workspace/integration-tests/it-pushgateway/src/test/resources/pushgateway-ssl.yaml:private-key:36 diff --git a/.idea/icon.svg b/.idea/icon.svg new file mode 100644 index 000000000..5c51f66d9 --- /dev/null +++ b/.idea/icon.svg @@ -0,0 +1,50 @@ + + + +image/svg+xml \ No newline at end of file diff --git a/.mise/envs/native/mise.toml b/.mise/envs/native/mise.toml new file mode 100644 index 000000000..de9c06a68 --- /dev/null +++ b/.mise/envs/native/mise.toml @@ -0,0 +1,8 @@ +[tools] +java = "oracle-graalvm-25.0.1" + +[tasks.native-test] +depends = "build" +run = "../../mvnw test -PnativeTest" +dir = "../../../integration-tests/it-spring-boot-smoke-test" + diff --git a/.mise/tasks/build-release.sh b/.mise/tasks/build-release.sh new file mode 100755 index 000000000..620dca77a --- /dev/null +++ b/.mise/tasks/build-release.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +#MISE description="Build release package" + +set -euo pipefail + +mvn -B package -P 'release,!default,!examples-and-integration-tests' \ + -Dmaven.test.skip=true -Dgpg.skip=true diff --git a/.mise/tasks/generate_benchmark_summary.py b/.mise/tasks/generate_benchmark_summary.py new file mode 100644 index 000000000..0b0c4fb01 --- /dev/null +++ b/.mise/tasks/generate_benchmark_summary.py @@ -0,0 +1,378 @@ +#!/usr/bin/env python3 + +# [MISE] description="Generate markdown summary from JMH benchmark JSON results" +# [MISE] alias="generate-benchmark-summary" + +""" +Generate a markdown summary from JMH benchmark JSON results. + +Usage: + python3 .mise/tasks/generate_benchmark_summary.py [--input results.json] [--output-dir ./benchmark-results] + +This script: +1. Reads JMH JSON output +2. Generates a README.md with formatted tables +3. Copies results to the output directory with historical naming +""" + +import argparse +import json +import os +import shutil +import sys +from datetime import datetime, timezone +from pathlib import Path +from typing import Dict, List, Optional + + +def parse_args(): + parser = argparse.ArgumentParser( + description="Generate benchmark summary from JMH JSON" + ) + parser.add_argument( + "--input", + default="benchmark-results.json", + help="Path to JMH JSON results file (default: benchmark-results.json)", + ) + parser.add_argument( + "--output-dir", + default="benchmark-results", + help="Output directory for results (default: benchmark-results)", + ) + parser.add_argument( + "--commit-sha", + default=None, + help="Git commit SHA (default: read from git or 'local')", + ) + return parser.parse_args() + + +def get_system_info() -> Dict[str, str]: + """Capture system hardware information.""" + import multiprocessing + import platform + + info = {} + + try: + info["cpu_cores"] = str(multiprocessing.cpu_count()) + except Exception: + pass + + try: + with open("/proc/cpuinfo", "r") as f: + for line in f: + if line.startswith("model name"): + info["cpu_model"] = line.split(":")[1].strip() + break + except FileNotFoundError: + # macOS + try: + import subprocess + + result = subprocess.run( + ["sysctl", "-n", "machdep.cpu.brand_string"], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + info["cpu_model"] = result.stdout.strip() + except Exception: + pass + + try: + with open("/proc/meminfo", "r") as f: + for line in f: + if line.startswith("MemTotal"): + kb = int(line.split()[1]) + info["memory_gb"] = str(round(kb / 1024 / 1024)) + break + except FileNotFoundError: + # macOS + try: + import subprocess + + result = subprocess.run( + ["sysctl", "-n", "hw.memsize"], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + bytes_mem = int(result.stdout.strip()) + info["memory_gb"] = str(round(bytes_mem / 1024 / 1024 / 1024)) + except Exception: + pass + + info["os"] = f"{platform.system()} {platform.release()}" + + return info + + +def get_commit_sha(provided_sha: Optional[str]) -> str: + """Get commit SHA from argument, git, or return 'local'.""" + if provided_sha: + return provided_sha + + try: + import subprocess + + result = subprocess.run( + ["git", "rev-parse", "HEAD"], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + return result.stdout.strip() + except Exception: + pass + + return "local" + + +def format_score(score) -> str: + """Format score with appropriate precision.""" + try: + val = float(score) + if val >= 1_000_000: + return f"{val / 1_000_000:.2f}M" + elif val >= 1_000: + return f"{val / 1_000:.2f}K" + else: + return f"{val:.2f}" + except (ValueError, TypeError): + return str(score) + + +def format_error(error) -> str: + """Format error value, handling NaN.""" + try: + error_val = float(error) + if error_val != error_val: # NaN check + return "" + elif error_val >= 1_000: + return f"± {error_val / 1_000:.2f}K" + else: + return f"± {error_val:.2f}" + except (ValueError, TypeError): + return "" + + +def generate_markdown(results: List, commit_sha: str, repo: str) -> str: + """Generate markdown summary from JMH results.""" + commit_short = commit_sha[:7] + datetime_str = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + # Extract metadata from first result + first = results[0] if results else {} + jdk_version = first.get("jdkVersion", "unknown") + vm_name = first.get("vmName", "unknown") + threads = first.get("threads", "?") + forks = first.get("forks", "?") + warmup_iters = first.get("warmupIterations", "?") + measure_iters = first.get("measurementIterations", "?") + + sysinfo = get_system_info() + + md = [] + md.append("# Prometheus Java Client Benchmarks") + md.append("") + + md.append("## Run Information") + md.append("") + md.append(f"- **Date:** {datetime_str}") + if commit_sha != "local": + md.append( + f"- **Commit:** [`{commit_short}`](https://github.com/{repo}/commit/{commit_sha})" + ) + else: + md.append(f"- **Commit:** `{commit_short}` (local run)") + md.append(f"- **JDK:** {jdk_version} ({vm_name})") + bench_cfg = f"{forks} fork(s), {warmup_iters} warmup, {measure_iters} measurement, {threads} threads" + md.append(f"- **Benchmark config:** {bench_cfg}") + + hw_parts = [] + if sysinfo.get("cpu_model"): + hw_parts.append(sysinfo["cpu_model"]) + if sysinfo.get("cpu_cores"): + hw_parts.append(f"{sysinfo['cpu_cores']} cores") + if sysinfo.get("memory_gb"): + hw_parts.append(f"{sysinfo['memory_gb']} GB RAM") + if hw_parts: + md.append(f"- **Hardware:** {', '.join(hw_parts)}") + if sysinfo.get("os"): + md.append(f"- **OS:** {sysinfo['os']}") + + md.append("") + + # Group by benchmark class + benchmarks_by_class: Dict[str, List] = {} + for b in results: + name = b.get("benchmark", "") + parts = name.rsplit(".", 1) + if len(parts) == 2: + class_name, method = parts + class_short = class_name.split(".")[-1] + else: + class_short = "Other" + benchmarks_by_class.setdefault(class_short, []).append(b) + + md.append("## Results") + md.append("") + + # Generate table for each class + for class_name in sorted(benchmarks_by_class.keys()): + benchmarks = benchmarks_by_class[class_name] + md.append(f"### {class_name}") + md.append("") + + # Sort by score descending + sorted_benchmarks = sorted( + benchmarks, + key=lambda x: x.get("primaryMetric", {}).get("score", 0), + reverse=True, + ) + + md.append("| Benchmark | Score | Error | Units | |") + md.append("|:----------|------:|------:|:------|:---|") + + best_score = ( + sorted_benchmarks[0].get("primaryMetric", {}).get("score", 1) + if sorted_benchmarks + else 1 + ) + + for i, b in enumerate(sorted_benchmarks): + name = b.get("benchmark", "").split(".")[-1] + score = b.get("primaryMetric", {}).get("score", 0) + error = b.get("primaryMetric", {}).get("scoreError", 0) + unit = b.get("primaryMetric", {}).get("scoreUnit", "ops/s") + + score_fmt = format_score(score) + error_fmt = format_error(error) + + # Calculate relative performance as multiplier + try: + if i == 0: + relative_fmt = "**fastest**" + else: + multiplier = float(best_score) / float(score) + if multiplier >= 10: + relative_fmt = f"{multiplier:.0f}x slower" + else: + relative_fmt = f"{multiplier:.1f}x slower" + except (ValueError, TypeError, ZeroDivisionError): + relative_fmt = "" + + md.append( + f"| {name} | {score_fmt} | {error_fmt} | {unit} | {relative_fmt} |" + ) + + md.append("") + + md.append("### Raw Results") + md.append("") + md.append("```") + md.append( + f"{'Benchmark':<50} {'Mode':>6} {'Cnt':>4} {'Score':>14} {'Error':>12} Units" + ) + + for b in sorted(results, key=lambda x: x.get("benchmark", "")): + name = b.get("benchmark", "").replace("io.prometheus.metrics.benchmarks.", "") + mode = b.get("mode", "thrpt") + cnt = b.get("measurementIterations", 0) * b.get("forks", 1) + score = b.get("primaryMetric", {}).get("score", 0) + error = b.get("primaryMetric", {}).get("scoreError", 0) + unit = b.get("primaryMetric", {}).get("scoreUnit", "ops/s") + + try: + score_str = f"{float(score):.3f}" + except (ValueError, TypeError): + score_str = str(score) + + try: + error_val = float(error) + if error_val != error_val: # NaN + error_str = "" + else: + error_str = f"± {error_val:.3f}" + except (ValueError, TypeError): + error_str = "" + + md.append( + f"{name:<50} {mode:>6} {cnt:>4} {score_str:>14} {error_str:>12} {unit}" + ) + + md.append("```") + md.append("") + + md.append("## Notes") + md.append("") + md.append("- **Score** = Throughput in operations per second (higher is better)") + md.append("- **Error** = 99.9% confidence interval") + md.append("") + + md.append("## Benchmark Descriptions") + md.append("") + md.append("| Benchmark | Description |") + md.append("|:----------|:------------|") + md.append( + "| **CounterBenchmark** | Counter increment performance: " + "Prometheus, OpenTelemetry, simpleclient, Codahale |" + ) + md.append( + "| **HistogramBenchmark** | Histogram observation performance " + "(classic vs native/exponential) |" + ) + md.append( + "| **TextFormatUtilBenchmark** | Metric exposition format writing speed |" + ) + md.append("") + return "\n".join(md) + + +def main(): + args = parse_args() + + input_path = Path(args.input) + if not input_path.exists(): + print(f"Error: Input file not found: {input_path}") + sys.exit(1) + + print(f"Reading results from: {input_path}") + with open(input_path, "r") as f: + results = json.load(f) + + print(f"Found {len(results)} benchmark results") + + commit_sha = get_commit_sha(args.commit_sha) + commit_short = commit_sha[:7] + repo = os.environ.get("GITHUB_REPOSITORY", "prometheus/client_java") + + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + history_dir = output_dir / "history" + history_dir.mkdir(parents=True, exist_ok=True) + + results_json_path = output_dir / "results.json" + shutil.copy(input_path, results_json_path) + print(f"Copied results to: {results_json_path}") + + date_str = datetime.now(timezone.utc).strftime("%Y-%m-%d") + history_path = history_dir / f"{date_str}-{commit_short}.json" + shutil.copy(input_path, history_path) + print(f"Saved historical entry: {history_path}") + + markdown = generate_markdown(results, commit_sha, repo) + readme_path = output_dir / "README.md" + with open(readme_path, "w") as f: + f.write(markdown) + print(f"Generated summary: {readme_path}") + + print(f"\nDone! Results are in: {output_dir}/") + + +if __name__ == "__main__": + main() diff --git a/.mise/tasks/lint/bom.py b/.mise/tasks/lint/bom.py new file mode 100755 index 000000000..d77b88e23 --- /dev/null +++ b/.mise/tasks/lint/bom.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 + +# [MISE] description="Make sure the BOM has all necessary modules" + +import difflib +import re +import sys +from fnmatch import fnmatch +from pathlib import Path +from typing import List + +ROOT = Path(__file__).resolve().parents[3] # repo root (.. from .mise/tasks/lint) +IGNORE_DIRS = {"prometheus-metrics-parent"} +MODULE_PREFIX = "prometheus-metrics" +BOM_POM = ROOT / "prometheus-metrics-bom" / "pom.xml" + + +def first_artifact_id(pom_file: Path) -> str: + """Return the second value from the given pom.xml (matches original script). + + The original shell function greps all lines and returns the second one + (head -n 2 | tail -n 1). We replicate that behavior exactly. + """ + if not pom_file.is_file(): + raise FileNotFoundError(f"File {pom_file} does not exist.") + + text = pom_file.read_text(encoding="utf-8") + matches = re.findall(r"\s*(.*?)\s*", text) + if len(matches) < 2: + return "" + return matches[1].strip() + + +def add_dir(dir_path: Path, want: List[str]): + if not dir_path.is_dir(): + raise FileNotFoundError(f"Directory {dir_path} does not exist.") + + if any(dir_path.name == ig for ig in IGNORE_DIRS): + return + + pom = dir_path / "pom.xml" + if not pom.is_file(): + raise FileNotFoundError(f"File {pom} does not exist.") + + artifact_id = first_artifact_id(pom) + if not artifact_id: + raise RuntimeError(f"No artifactId found in {pom}") + + want.append(artifact_id) + + +def collect_want(root: Path) -> List[str]: + want: List[str] = [] + # top-level prometheus-metrics* + for entry in sorted(root.iterdir()): + if entry.is_dir() and fnmatch(entry.name, f"{MODULE_PREFIX}*"): + add_dir(entry, want) + + # prometheus-metrics-tracer/prometheus-metrics* + tracer_dir = root / "prometheus-metrics-tracer" + if tracer_dir.is_dir(): + for entry in sorted(tracer_dir.iterdir()): + if entry.is_dir() and fnmatch(entry.name, f"{MODULE_PREFIX}*"): + add_dir(entry, want) + + # deduplicate and sort + want_unique = sorted(set(want)) + return want_unique + + +def collect_have(bom_pom: Path) -> List[str]: + if not bom_pom.is_file(): + raise FileNotFoundError(f"BOM file {bom_pom} does not exist.") + + text = bom_pom.read_text(encoding="utf-8") + # find artifactId values that start with MODULE_PREFIX + matches = re.findall( + r"\s*(%s[^<\s]*)\s*" % re.escape(MODULE_PREFIX), text + ) + return sorted(matches) + + +def main() -> int: + try: + want = collect_want(ROOT) + have = collect_have(BOM_POM) + + want_text = "\n".join(want) + have_text = "\n".join(have) + + if want_text != have_text: + print( + "The BOM file prometheus-metrics-bom/bom.xml does not match the current directory contents." + ) + print("Expected:") + print(want_text) + print("Found:") + print(have_text) + print() + diff = difflib.unified_diff( + have_text.splitlines(keepends=True), + want_text.splitlines(keepends=True), + fromfile="found", + tofile="expected", + ) + sys.stdout.writelines(diff) + return 1 + else: + return 0 + + except Exception as e: + print(e, file=sys.stderr) + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/.mise/tasks/lint/example-poms.py b/.mise/tasks/lint/example-poms.py new file mode 100755 index 000000000..f9b5b776b --- /dev/null +++ b/.mise/tasks/lint/example-poms.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 + +# [MISE] description="Verify standalone example POMs won't break spotless" + +"""Check that standalone example modules don't break 'mise run format'. + +Example modules are intentionally standalone (no from the project) +so users can copy them. But they're included in the Maven reactor via the +examples-and-integration-tests profile. If 'mise run format' doesn't +exclude them, spotless:apply fails because the plugin isn't declared. + +This lint verifies that every standalone example POM is excluded from +the format task in mise.toml. +""" + +import re +import sys +from pathlib import Path + +ROOT = Path(__file__).resolve().parents[3] +EXAMPLES_DIR = ROOT / "examples" + + +def find_standalone_example_poms() -> list[Path]: + """Find example pom.xml files that don't inherit from the project parent.""" + standalone = [] + for pom in sorted(EXAMPLES_DIR.rglob("pom.xml")): + if "target" in pom.parts: + continue + text = pom.read_text(encoding="utf-8") + # Check if this POM has a with the project's groupId/artifactId + has_project_parent = bool( + re.search( + r"\s*io\.prometheus\s*" + r"client_java", + text, + ) + ) + if not has_project_parent: + standalone.append(pom) + return standalone + + +def format_task_excludes_examples() -> bool: + """Check that the format task in mise.toml excludes standalone examples.""" + mise_toml = ROOT / "mise.toml" + text = mise_toml.read_text(encoding="utf-8") + # Look for the format task run command + match = re.search(r'\[tasks\.format\].*?run\s*=\s*"([^"]*)"', text, re.DOTALL) + if not match: + return False + run_cmd = match.group(1) + # The command should deactivate the examples-and-integration-tests profile + return "!examples-and-integration-tests" in run_cmd + + +def main() -> int: + standalone = find_standalone_example_poms() + if not standalone: + return 0 + + if format_task_excludes_examples(): + return 0 + + print("ERROR: Standalone example POMs found but 'mise run format'") + print("does not exclude the examples-and-integration-tests profile.") + print() + print("Standalone example POMs (no project parent):") + for pom in standalone: + print(f" {pom.relative_to(ROOT)}") + print() + print("Fix: ensure the format task in mise.toml deactivates the") + print("examples-and-integration-tests profile, e.g.:") + print(" -P '!examples-and-integration-tests'") + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/.mise/tasks/set-release-version-github-pages.sh b/.mise/tasks/set-release-version-github-pages.sh new file mode 100755 index 000000000..2016373c8 --- /dev/null +++ b/.mise/tasks/set-release-version-github-pages.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +#MISE description="Set release version in all GitHub Pages docs" + +set -euox pipefail + +version=$(git tag -l | grep 'v' | sort | tail -1 | sed 's/v//') +otelVersion=$(grep -oP '\K[^<]+' pom.xml | sed 's/-alpha$//') + +find ./docs/content -name '*.md' \ + -exec sed -i "s/\$version/$version/g" {} + \ + -exec sed -i "s/\$otelVersion/$otelVersion/g" {} + diff --git a/.mise/tasks/test_update-benchmarks.py b/.mise/tasks/test_update-benchmarks.py new file mode 100644 index 000000000..3d80b4983 --- /dev/null +++ b/.mise/tasks/test_update-benchmarks.py @@ -0,0 +1,98 @@ +import os +import re +import sys +import tempfile +import unittest + +from update_benchmarks import update_pre_blocks_under_module + +# Ensure the tasks directory is importable when running the test directly +here = os.path.dirname(__file__) +if here not in sys.path: + sys.path.insert(0, here) + + +class TestRunBenchmarksFiltering(unittest.TestCase): + def setUp(self): + # sample JMH table with mixed-class lines + self.table = ( + "Benchmark Mode Cnt Score Error Units\n" + "CounterBenchmark.codahaleIncNoLabels thrpt 57881.585 ops/s\n" + "HistogramBenchmark.prometheusNative thrpt 2385.134 ops/s\n" + "TextFormatUtilBenchmark.prometheusWriteToNull thrpt 885331.328 ops/s\n" + "CounterBenchmark.prometheusInc thrpt 54090.469 ops/s\n" + ) + + # create temp dir to act as module path + self.tmpdir = tempfile.TemporaryDirectory() + self.module_path = self.tmpdir.name + + # Create three files with a javadoc
 block that contains mixed results
+        self.files = {}
+        javadoc_pre = (
+            "/**\n"
+            " * Example javadoc\n"
+            " * 
\n"
+            " * Benchmark                                             Mode  Cnt       Score   Error  Units\n"
+            " * CounterBenchmark.codahaleIncNoLabels                 thrpt        57881.585          ops/s\n"
+            " * HistogramBenchmark.prometheusNative                  thrpt         2385.134          ops/s\n"
+            " * TextFormatUtilBenchmark.prometheusWriteToNull        thrpt       885331.328          ops/s\n"
+            " * CounterBenchmark.prometheusInc                       thrpt        54090.469          ops/s\n"
+            " * 
\n" + " */\n" + ) + + for cls in ( + "CounterBenchmark", + "HistogramBenchmark", + "TextFormatUtilBenchmark", + ): + fname = os.path.join(self.module_path, f"{cls}.java") + with open(fname, "w", encoding="utf-8") as f: + f.write(javadoc_pre) + f.write(f"public class {cls} {{}}\n") + self.files[cls] = fname + + def tearDown(self): + self.tmpdir.cleanup() + + def _read_pre_contents(self, path): + with open(path, "r", encoding="utf-8") as f: + content = f.read() + m = re.search(r"
\n([\s\S]*?)
", content) + return m.group(1) if m else "" + + def test_update_only_inserts_matching_class_lines(self): + updated = update_pre_blocks_under_module(self.module_path, self.table) + # All three files should be updated + self.assertEqual( + set(os.path.basename(p) for p in updated), + { + os.path.basename(self.files["CounterBenchmark"]), + os.path.basename(self.files["HistogramBenchmark"]), + os.path.basename(self.files["TextFormatUtilBenchmark"]), + }, + ) + + # Verify CounterBenchmark file contains only CounterBenchmark lines + cb_pre = self._read_pre_contents(self.files["CounterBenchmark"]) + self.assertIn("CounterBenchmark.codahaleIncNoLabels", cb_pre) + self.assertIn("CounterBenchmark.prometheusInc", cb_pre) + self.assertNotIn("HistogramBenchmark.prometheusNative", cb_pre) + self.assertNotIn("TextFormatUtilBenchmark.prometheusWriteToNull", cb_pre) + + # Verify HistogramBenchmark contains only its line + hb_pre = self._read_pre_contents(self.files["HistogramBenchmark"]) + self.assertIn("HistogramBenchmark.prometheusNative", hb_pre) + self.assertNotIn("CounterBenchmark.codahaleIncNoLabels", hb_pre) + self.assertNotIn("TextFormatUtilBenchmark.prometheusWriteToNull", hb_pre) + + # Verify TextFormatUtilBenchmark contains only its line + tf_pre = self._read_pre_contents(self.files["TextFormatUtilBenchmark"]) + self.assertIn("TextFormatUtilBenchmark.prometheusWriteToNull", tf_pre) + self.assertNotIn("CounterBenchmark.prometheusInc", tf_pre) + self.assertNotIn("HistogramBenchmark.prometheusNative", tf_pre) + + +if __name__ == "__main__": + unittest.main() diff --git a/.mise/tasks/update_benchmarks.py b/.mise/tasks/update_benchmarks.py new file mode 100755 index 000000000..3cb550877 --- /dev/null +++ b/.mise/tasks/update_benchmarks.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python3 + +# [MISE] description="Run and update JMH benchmark outputs in the benchmarks module" +# [MISE] alias="update-benchmarks" + +""" +Run benchmarks for the `benchmarks` module, capture JMH text output, and update +any
...
blocks containing "thrpt" under the `benchmarks/` module +(files such as Java sources with embedded example output in javadocs). + +Usage: ./.mise/tasks/update_benchmarks.py [--mvnw ./mvnw] [--module benchmarks] [--java java] + [--jmh-args "-f 1 -wi 0 -i 1"] + +By default this will: + - run the maven wrapper to package the benchmarks: `./mvnw -pl benchmarks -am -DskipTests package` + - locate the shaded jar under `benchmarks/target/` (named containing "benchmarks") + - run `java -jar -rf text` (add extra JMH args with --jmh-args) + - parse the first JMH table (the block starting with the "Benchmark Mode" header) + - update all files under the `benchmarks/` directory which contain a `
` block with the substring "thrpt"
+
+This script is careful to preserve Javadoc comment prefixes like " * " when replacing the
+contents of the 
 block.
+"""
+
+import argparse
+import glob
+import os
+import re
+import shlex
+import subprocess
+import sys
+from typing import List, Optional
+
+
+def run_cmd(cmd: List[str], cwd: Optional[str] = None) -> str:
+    """Run a command, stream stdout/stderr to the console for progress, and return the full output.
+
+    This replaces the previous blocking subprocess.run approach so users can see build / JMH
+    progress in real time while the command runs.
+    """
+    try:
+        proc = subprocess.Popen(
+            cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True
+        )
+    except FileNotFoundError:
+        # Helpful message if the executable is not found
+        print(f"Command not found: {cmd[0]}")
+        raise
+
+    output_lines: List[str] = []
+    try:
+        assert proc.stdout is not None
+        # Stream lines as they appear and capture them for returning
+        for line in proc.stdout:
+            # Print immediately so callers (and CI) can observe progress
+            print(line, end="")
+            output_lines.append(line)
+        proc.wait()
+    except KeyboardInterrupt:
+        # If the user interrupts, ensure the child process is terminated
+        proc.kill()
+        proc.wait()
+        print("\nCommand interrupted by user.")
+        raise
+
+    output = "".join(output_lines)
+    if proc.returncode != 0:
+        print(
+            f"Command failed: {' '.join(cmd)}\nExit: {proc.returncode}\nOutput:\n{output}"
+        )
+        raise SystemExit(proc.returncode)
+    return output
+
+
+def build_benchmarks(mvnw: str, module: str) -> None:
+    print(f"Building Maven module '{module}' using {mvnw} (this may take a while)...")
+    cmd = [mvnw, "-pl", module, "-am", "-DskipTests", "clean", "package"]
+    run_cmd(cmd)
+    print("Build completed.")
+
+
+def find_benchmarks_jar(module: str) -> str:
+    pattern = os.path.join(module, "target", "*.jar")
+    jars = [p for p in glob.glob(pattern) if "original" not in p and p.endswith(".jar")]
+    # prefer jar whose basename contains module name
+    jars_pref = [j for j in jars if module in os.path.basename(j)]
+    chosen = (jars_pref or jars)[:1]
+    if not chosen:
+        raise FileNotFoundError(
+            f"No jar found in {os.path.join(module, 'target')} (tried: {pattern})"
+        )
+    jar = chosen[0]
+    print(f"Using jar: {jar}")
+    return jar
+
+
+def run_jmh(jar: str, java_cmd: str, extra_args: Optional[str]) -> str:
+    args = [java_cmd, "-jar", jar, "-rf", "text"]
+    if extra_args:
+        args += shlex.split(extra_args)
+    print(f"Running JMH: {' '.join(args)}")
+    output = run_cmd(args)
+    print("JMH run completed.")
+    return output
+
+
+def extract_first_table(jmh_output: str) -> str:
+    # Try to extract the first table that starts with "Benchmark" header and continues until a blank line
+    m = re.search(r"(\nBenchmark\s+Mode[\s\S]*?)(?:\n\s*\n|\Z)", jmh_output)
+    if not m:
+        # fallback: collect all lines that contain 'thrpt' plus a header if present
+        lines = [line for line in jmh_output.splitlines() if "thrpt" in line]
+        if not lines:
+            raise ValueError('Could not find any "thrpt" lines in JMH output')
+        # try to find header
+        header = next(
+            (
+                line
+                for line in jmh_output.splitlines()
+                if line.startswith("Benchmark") and "Mode" in line
+            ),
+            "Benchmark                                     Mode  Cnt      Score     Error  Units",
+        )
+        return header + "\n" + "\n".join(lines)
+    table = m.group(1).strip("\n")
+    # Ensure we return only the table lines (remove any leading iteration info lines that JMH sometimes prints)
+    # Normalize spaces: keep as-is
+    return table
+
+
+def filter_table_for_class(table: str, class_name: str) -> Optional[str]:
+    """
+    Return a table string that contains only the header and the lines belonging to `class_name`.
+    If no matching lines are found, return None.
+    """
+    lines = table.splitlines()
+    # find header line index (starts with 'Benchmark' and contains 'Mode')
+    header_idx = None
+    for i, ln in enumerate(lines):
+        if ln.strip().startswith("Benchmark") and "Mode" in ln:
+            header_idx = i
+            break
+    header = (
+        lines[header_idx]
+        if header_idx is not None
+        else "Benchmark                                     Mode  Cnt      Score     Error  Units"
+    )
+
+    matched = []
+    pattern = re.compile(r"^\s*" + re.escape(class_name) + r"\.")
+    for ln in lines[header_idx + 1 if header_idx is not None else 0 :]:
+        if "thrpt" in ln and pattern.search(ln):
+            matched.append(ln)
+
+    if not matched:
+        return None
+    return header + "\n" + "\n".join(matched)
+
+
+def update_pre_blocks_under_module(module: str, table: str) -> List[str]:
+    # Find files under module and update any 
...
block that contains 'thrpt' + updated_files = [] + for path in glob.glob(os.path.join(module, "**"), recursive=True): + if os.path.isdir(path): + continue + try: + with open(path, "r", encoding="utf-8") as f: + content = f.read() + except Exception: + continue + # quick filter + if "
" not in content or "thrpt" not in content:
+            continue
+
+        original = content
+
+        # Determine the class name from the filename (e.g. TextFormatUtilBenchmark.java -> TextFormatUtilBenchmark)
+        base = os.path.basename(path)
+        class_name = os.path.splitext(base)[0]
+
+        # Build a filtered table for this class; if no matching lines, skip updating this file
+        filtered_table = filter_table_for_class(table, class_name)
+        if filtered_table is None:
+            # nothing to update for this class
+            continue
+
+        # Regex to find any line-starting Javadoc prefix like " * " before 
+        # This will match patterns like: " * 
... 
" and capture the prefix (e.g. " * ") + pattern = re.compile(r"(?m)^(?P[ \t]*\*[ \t]*)
[\s\S]*?
") + + def repl(m: re.Match) -> str: + prefix = m.group("prefix") + # Build the new block with the same prefix on each line + lines = filtered_table.splitlines() + replaced = prefix + "
\n"
+            for ln in lines:
+                replaced += prefix + ln.rstrip() + "\n"
+            replaced += prefix + "
" + return replaced + + new_content, nsubs = pattern.subn(repl, content) + if nsubs > 0 and new_content != original: + with open(path, "w", encoding="utf-8") as f: + f.write(new_content) + updated_files.append(path) + print(f"Updated {path}: replaced {nsubs}
 block(s)")
+    return updated_files
+
+
+def main(argv: List[str]):
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--mvnw", default="./mvnw", help="Path to maven wrapper")
+    parser.add_argument(
+        "--module", default="benchmarks", help="Module directory to build/run"
+    )
+    parser.add_argument("--java", default="java", help="Java command")
+    parser.add_argument(
+        "--jmh-args",
+        default="",
+        help='Extra arguments to pass to the JMH main (e.g. "-f 1 -wi 0 -i 1")',
+    )
+    args = parser.parse_args(argv)
+
+    build_benchmarks(args.mvnw, args.module)
+    jar = find_benchmarks_jar(args.module)
+    output = run_jmh(jar, args.java, args.jmh_args)
+
+    # Print a short preview of the JMH output
+    preview = "\n".join(output.splitlines()[:120])
+    print("\n--- JMH output preview ---")
+    print(preview)
+    print("--- end preview ---\n")
+
+    table = extract_first_table(output)
+
+    updated = update_pre_blocks_under_module(args.module, table)
+
+    if not updated:
+        print(
+            'No files were updated (no 
 blocks with "thrpt" found under the module).'
+        )
+    else:
+        print("\nUpdated files:")
+        for p in updated:
+            print(" -", p)
+
+
+if __name__ == "__main__":
+    main(sys.argv[1:])
diff --git a/.mvn/jvm.config b/.mvn/jvm.config
new file mode 100644
index 000000000..32599cefe
--- /dev/null
+++ b/.mvn/jvm.config
@@ -0,0 +1,10 @@
+--add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED
+--add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED
+--add-exports jdk.compiler/com.sun.tools.javac.main=ALL-UNNAMED
+--add-exports jdk.compiler/com.sun.tools.javac.model=ALL-UNNAMED
+--add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED
+--add-exports jdk.compiler/com.sun.tools.javac.processing=ALL-UNNAMED
+--add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED
+--add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED
+--add-opens jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED
+--add-opens jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED
diff --git a/.mvn/wrapper/MavenWrapperDownloader.java b/.mvn/wrapper/MavenWrapperDownloader.java
deleted file mode 100644
index b901097f2..000000000
--- a/.mvn/wrapper/MavenWrapperDownloader.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright 2007-present the original author or authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import java.net.*;
-import java.io.*;
-import java.nio.channels.*;
-import java.util.Properties;
-
-public class MavenWrapperDownloader {
-
-    private static final String WRAPPER_VERSION = "0.5.6";
-    /**
-     * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
-     */
-    private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
-        + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
-
-    /**
-     * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
-     * use instead of the default one.
-     */
-    private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
-            ".mvn/wrapper/maven-wrapper.properties";
-
-    /**
-     * Path where the maven-wrapper.jar will be saved to.
-     */
-    private static final String MAVEN_WRAPPER_JAR_PATH =
-            ".mvn/wrapper/maven-wrapper.jar";
-
-    /**
-     * Name of the property which should be used to override the default download url for the wrapper.
-     */
-    private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
-
-    public static void main(String args[]) {
-        System.out.println("- Downloader started");
-        File baseDirectory = new File(args[0]);
-        System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
-
-        // If the maven-wrapper.properties exists, read it and check if it contains a custom
-        // wrapperUrl parameter.
-        File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
-        String url = DEFAULT_DOWNLOAD_URL;
-        if(mavenWrapperPropertyFile.exists()) {
-            FileInputStream mavenWrapperPropertyFileInputStream = null;
-            try {
-                mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
-                Properties mavenWrapperProperties = new Properties();
-                mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
-                url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
-            } catch (IOException e) {
-                System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
-            } finally {
-                try {
-                    if(mavenWrapperPropertyFileInputStream != null) {
-                        mavenWrapperPropertyFileInputStream.close();
-                    }
-                } catch (IOException e) {
-                    // Ignore ...
-                }
-            }
-        }
-        System.out.println("- Downloading from: " + url);
-
-        File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
-        if(!outputFile.getParentFile().exists()) {
-            if(!outputFile.getParentFile().mkdirs()) {
-                System.out.println(
-                        "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
-            }
-        }
-        System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
-        try {
-            downloadFileFromURL(url, outputFile);
-            System.out.println("Done");
-            System.exit(0);
-        } catch (Throwable e) {
-            System.out.println("- Error downloading");
-            e.printStackTrace();
-            System.exit(1);
-        }
-    }
-
-    private static void downloadFileFromURL(String urlString, File destination) throws Exception {
-        if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
-            String username = System.getenv("MVNW_USERNAME");
-            char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
-            Authenticator.setDefault(new Authenticator() {
-                @Override
-                protected PasswordAuthentication getPasswordAuthentication() {
-                    return new PasswordAuthentication(username, password);
-                }
-            });
-        }
-        URL website = new URL(urlString);
-        ReadableByteChannel rbc;
-        rbc = Channels.newChannel(website.openStream());
-        FileOutputStream fos = new FileOutputStream(destination);
-        fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
-        fos.close();
-        rbc.close();
-    }
-
-}
diff --git a/.mvn/wrapper/maven-wrapper.jar b/.mvn/wrapper/maven-wrapper.jar
deleted file mode 100644
index 2cc7d4a55..000000000
Binary files a/.mvn/wrapper/maven-wrapper.jar and /dev/null differ
diff --git a/.mvn/wrapper/maven-wrapper.properties b/.mvn/wrapper/maven-wrapper.properties
index ffdc10e59..1027a01fe 100644
--- a/.mvn/wrapper/maven-wrapper.properties
+++ b/.mvn/wrapper/maven-wrapper.properties
@@ -1,2 +1,2 @@
-distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.1/apache-maven-3.8.1-bin.zip
-wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar
+distributionType=only-script
+distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.13/apache-maven-3.9.13-bin.zip
diff --git a/.yaml-lint.yml b/.yaml-lint.yml
new file mode 100644
index 000000000..a06ffeed5
--- /dev/null
+++ b/.yaml-lint.yml
@@ -0,0 +1,5 @@
+extends: relaxed
+
+rules:
+  line-length:
+    max: 120
diff --git a/AGENTS.md b/AGENTS.md
new file mode 100644
index 000000000..ba4cab7a9
--- /dev/null
+++ b/AGENTS.md
@@ -0,0 +1,179 @@
+# AGENTS.md
+
+This file provides guidance to AI coding agents when working
+with code in this repository.
+
+## Build Commands
+
+This project uses Maven with mise for task automation.
+The Maven wrapper (`./mvnw`) is used for all builds.
+
+```bash
+# Full CI build (clean + install + all checks)
+mise run ci
+
+# Quick compile without tests or checks (fastest)
+mise run compile
+
+# Run unit tests only (skips formatting/coverage/checkstyle)
+mise run test
+
+# Run all tests including integration tests
+mise run test-all
+
+# Format code with Google Java Format
+mise run format
+
+# Run a single test class
+./mvnw test -Dtest=CounterTest \
+  -Dspotless.check.skip=true \
+  -Dcoverage.skip=true -Dcheckstyle.skip=true
+
+# Run a single test method
+./mvnw test -Dtest=CounterTest#testIncrement \
+  -Dspotless.check.skip=true \
+  -Dcoverage.skip=true -Dcheckstyle.skip=true
+
+# Run tests in a specific module
+./mvnw test -pl prometheus-metrics-core \
+  -Dspotless.check.skip=true \
+  -Dcoverage.skip=true -Dcheckstyle.skip=true
+
+# Regenerate protobuf classes (after protobuf dep update)
+mise run generate
+```
+
+## Architecture
+
+The library follows a layered architecture where metrics
+flow from core types through a registry to exporters:
+
+```text
+prometheus-metrics-core (user-facing API)
+         │
+         ▼ collect()
+prometheus-metrics-model (immutable snapshots)
+         │
+         ▼
+prometheus-metrics-exposition-formats
+         │
+         ▼
+Exporters (httpserver, servlet, pushgateway, otel)
+```
+
+### Key Modules
+
+- **prometheus-metrics-core**: User-facing metric types
+  (Counter, Gauge, Histogram, Summary, Info, StateSet).
+  All metrics implement `Collector` with `collect()`.
+- **prometheus-metrics-model**: Internal read-only immutable
+  snapshot types returned by `collect()`.
+  Contains `PrometheusRegistry` for metric registration.
+- **prometheus-metrics-config**: Runtime configuration via
+  properties files or system properties.
+- **prometheus-metrics-exposition-formats**: Converts
+  snapshots to Prometheus exposition formats.
+- **prometheus-metrics-tracer**: Exemplar support with
+  OpenTelemetry tracing integration.
+- **prometheus-metrics-simpleclient-bridge**: Allows legacy
+  simpleclient 0.16.0 metrics to work with the new registry.
+
+### Instrumentation Modules
+
+Pre-built instrumentations:
+`prometheus-metrics-instrumentation-jvm`, `-caffeine`,
+`-guava`, `-dropwizard`, `-dropwizard5`.
+
+## Code Style
+
+- **Formatter**: Google Java Format (enforced via Spotless)
+- **Line length**: 100 characters
+  (enforced for ALL files including Markdown, Java, YAML)
+- **Indentation**: 2 spaces
+- **Static analysis**: `Error Prone` with NullAway
+  (`io.prometheus.metrics` package)
+- **Logger naming**: Logger fields must be named `logger`
+  (not `log`, `LOG`, or `LOGGER`)
+- **Assertions in tests**: Use static imports from AssertJ
+  (`import static ...Assertions.assertThat`)
+- **Empty catch blocks**: Use `ignored` as the variable name
+- **Markdown code blocks**: Always specify language
+  (e.g., ` ```java`, ` ```bash`, ` ```text`)
+
+## Linting and Validation
+
+**CRITICAL**: These checks MUST be run before creating any
+commits. CI will fail if these checks fail.
+
+### Java Files
+
+- **ALWAYS** run `mise run build` after modifying Java files
+  to ensure:
+  - Code formatting (Spotless with Google Java Format)
+  - Static analysis (`Error Prone` with NullAway)
+  - Checkstyle validation
+  - Build succeeds (tests are skipped;
+    run `mise run test` or `mise run test-all` for tests)
+
+### Non-Java Files (Markdown, YAML, JSON, shell scripts)
+
+- **ALWAYS** run `mise run lint` after modifying non-Java
+  files (runs super-linter + link checking + BOM check)
+- `mise run fix` autofixes linting issues
+- Super-linter will **autofix** many issues
+  (formatting, trailing whitespace, etc.)
+- It only reports ERROR-level issues
+  (configured via `LOG_LEVEL=ERROR` in
+  `.github/super-linter.env`)
+- Common issues caught:
+  - Lines exceeding 100 characters in Markdown files
+  - Missing language tags in fenced code blocks
+  - Table formatting issues
+  - YAML/JSON syntax errors
+
+### Running Linters
+
+```bash
+# After modifying Java files (run BEFORE committing)
+mise run build
+
+# After modifying non-Java files (run BEFORE committing)
+mise run lint
+# or to autofix: mise run fix
+```
+
+### Before Pushing
+
+**ALWAYS** run `mise run lint` before pushing to verify
+all lints pass. CI runs the same checks and will fail
+if any lint is violated.
+
+## Testing
+
+- JUnit 5 (Jupiter) with `@Test` annotations
+- AssertJ for fluent assertions
+- Mockito for mocking
+- **Test visibility**: Test classes and test methods must be
+  package-protected (no `public` modifier)
+- Integration tests are in `integration-tests/` and run
+  during `verify` phase
+- Acceptance tests use OATs framework:
+  `mise run acceptance-test`
+
+## Documentation
+
+- Docs live under `docs/content/` and use `$version` as a
+  placeholder for the library version
+- When publishing GitHub Pages,
+  `mise run set-release-version-github-pages` replaces
+  `$version` with the latest Git tag across all
+  `docs/content/**/*.md` files
+  (the published site is not versioned)
+- Use `$version` for the Prometheus client version and
+  `$otelVersion-alpha` for the OTel instrumentation
+  version — never hardcode them
+
+## Java Version
+
+Source compatibility: Java 8. Tests run on Java 25
+(configured in `mise.toml`).
diff --git a/CLAUDE.md b/CLAUDE.md
new file mode 100644
index 000000000..6b5e23414
--- /dev/null
+++ b/CLAUDE.md
@@ -0,0 +1,3 @@
+
+
+@AGENTS.md
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 3bbe74bc2..1e478f0dc 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -2,11 +2,121 @@
 
 Prometheus uses GitHub to manage reviews of pull requests.
 
-* If you have a trivial fix or improvement, go ahead and create a pull request,
+- If you have a trivial fix or improvement, go ahead and create a pull request,
   addressing (with `@...`) the maintainer of this repository (see
-  [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
+  [MAINTAINERS.md](MAINTAINERS.md)) in the
+  description of the pull request.
 
-* If you plan to do something more involved, first discuss your ideas
+- If you plan to do something more involved, first discuss your ideas
   on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
   This will avoid unnecessary work and surely give you and us a good deal
   of inspiration.
+
+## Signing Off Commits
+
+Every commit must include a `Signed-off-by` line, as required by the
+[Developer Certificate of Origin (DCO)](https://developercertificate.org/).
+
+Sign off each commit by passing `--signoff` (or `-s`) to `git commit`:
+
+```bash
+git commit --signoff -m "Your commit message"
+```
+
+To sign off only the most recent commit, use `--amend`:
+
+```bash
+git commit --amend --signoff --no-edit
+```
+
+To sign off multiple commits, rebase (replace `N` with the number of commits):
+
+```bash
+git rebase --signoff HEAD~N
+```
+
+Then force-push the branch:
+
+```bash
+git push --force-with-lease
+```
+
+## Formatting
+
+This repository uses [Google Java Format](https://github.com/google/google-java-format) to format
+the code.
+
+Run `./mvnw spotless:apply` to format the code (only changed files) before committing.
+
+Or run all the linters:
+
+`mise run lint`
+
+To autofix linting issues:
+
+`mise run fix`
+
+## Running Tests
+
+If you're getting errors when running tests:
+
+- Make sure that the IDE uses only the "Maven Shade" dependency of "
+  prometheus-metrics-exposition-formats" and the "prometheus-metrics-tracer\*" dependencies.
+
+### Running native tests
+
+```shell
+mise --cd .mise/envs/native run native-test
+```
+
+### Avoid failures while running tests
+
+- Use `-Dspotless.check.skip=true` to skip the formatting check during development.
+- Use `-Dcoverage.skip=true` to skip the coverage check during development.
+- Use `-Dcheckstyle.skip=true` to skip the checkstyle check during development.
+- Use `-Dwarnings=-nowarn` to skip the warnings during development.
+
+Combine all with
+
+```shell
+./mvnw install -DskipTests -Dspotless.check.skip=true -Dcoverage.skip=true \
+  -Dcheckstyle.skip=true -Dwarnings=-nowarn
+```
+
+or simply
+
+```shell
+mise run compile
+```
+
+## Version Numbers in Examples
+
+Example `pom.xml` files (under `examples/`) should reference the latest
+**released** version, not a SNAPSHOT. After each release, Renovate
+updates these versions automatically.
+
+Only use a SNAPSHOT version in an example when it demonstrates a new
+feature that has not been released yet.
+
+## Updating the Protobuf Java Classes
+
+The generated protobuf `Metrics.java` lives in a versioned package
+(e.g., `...generated.com_google_protobuf_4_33_5`) that changes with each
+protobuf release. A stable extending class at
+`...generated/Metrics.java` reexports all types so that consumer code
+only imports from the version-free package. On protobuf upgrades only
+the `extends` clause in the stable class changes.
+
+In the failing PR from renovate, run:
+
+```shell
+mise run generate
+```
+
+The script will:
+
+1. Re-generate the protobuf sources with the new version.
+2. Update the versioned package name in all Java files
+   (including the stable `Metrics.java` extends clause).
+
+Add the updated files to Git and commit them.
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index c79384366..cf0a885eb 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -1,3 +1,6 @@
-* Fabian Stäber  @fstab
-* Doug Hoard  @dhoard
-* Tom Wilkie  @tomwilkie
+# Maintainers
+
+- Fabian Stäber  @fstab
+- Doug Hoard  @dhoard
+- Tom Wilkie  @tomwilkie
+- Gregor Zeitlinger  @zeitlinger
diff --git a/MAINTAINER_NOTES.md b/MAINTAINER_NOTES.md
deleted file mode 100644
index 90f030c23..000000000
--- a/MAINTAINER_NOTES.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Maintainer Notes
-
-## Update Dependency Versions
-
-Use the [Versions Maven Plugin](https://www.mojohaus.org/versions-maven-plugin/index.html). Rules are configured in [version-rules.xml](version-rules.xml).
-
-```
-./mvnw versions:use-latest-releases
-```
-
-## Release
-
-```
-./mvnw release:prepare -DreleaseVersion=1.2.0 -DdevelopmentVersion=1.3.0-SNAPSHOT
-./mvnw release:perform -DreleaseVersion=1.2.0 -DdevelopmentVersion=1.3.0-SNAPSHOT
-```
-
-`release:prepare` does Github tags and commits, while `release:perform` signs the artifacts and uploads them to the staging repositoring on [https://oss.sonatype.org](https://oss.sonatype.org).
-
-After that, manually verify the uploaded artifacts on [https://oss.sonatype.org/#stagingRepositories](https://oss.sonatype.org/#stagingRepositories), click `Close` to trigger Sonatype's verification, and then `Release`.
-
-Note: We release only the parent module and the modules starting with simpleclient. Currently, we manually remove the benchmark and integration test modules. Todo: Instead of manually removing these modules, we should reconfigure the build to make sure that these modules aren't released.
diff --git a/README.md b/README.md
index 0cf4f3b80..a6b611db2 100644
--- a/README.md
+++ b/README.md
@@ -1,21 +1,26 @@
 # Prometheus Java Metrics Library
 
-[![Build Status](https://circleci.com/gh/prometheus/client_java.svg?style=svg)](https://circleci.com/gh/prometheus/client_java)
+[![Build](https://github.com/prometheus/client_java/actions/workflows/build.yml/badge.svg)](https://github.com/prometheus/client_java/actions/workflows/build.yml) java 8+ Apache 2.0 # editorconfig-checker-disable-line
 
-# Documentation
+## Documentation
 
 [https://prometheus.github.io/client_java](https://prometheus.github.io/client_java)
 
-# Contributing and community
+## Contributing and community
 
-See [CONTRIBUTING.md](CONTRIBUTING.md) and the [community section](http://prometheus.io/community/) of the Prometheus homepage.
+See [CONTRIBUTING.md](CONTRIBUTING.md) and
+the [community section](http://prometheus.io/community/)
+of the Prometheus homepage.
 
-The Prometheus Java community is present on the [CNCF Slack](https://cloud-native.slack.com) on `#prometheus-java`, and we have a fortnightly community call in the [Prometheus public calendar](https://prometheus.io/community/).
+The Prometheus Java community is present on the [CNCF Slack](https://cloud-native.slack.com) on
+`#prometheus-java`, and we have a fortnightly community call in
+the [Prometheus public calendar](https://prometheus.io/community/).
 
-# Previous Releases
+## Previous Releases
 
-The source code for 0.16.0 and older is on the [simpleclient](https://github.com/prometheus/client_java/tree/simpleclient) branch.
+The source code for 0.16.0 and older is on
+the [simpleclient](https://github.com/prometheus/client_java/tree/simpleclient) branch.
 
-# License
+## License
 
 Apache License 2.0, see [LICENSE](LICENSE).
diff --git a/RELEASING.md b/RELEASING.md
new file mode 100644
index 000000000..25f2d59a5
--- /dev/null
+++ b/RELEASING.md
@@ -0,0 +1,58 @@
+# Releasing Instructions for Prometheus Java Client
+
+Releases are automated via
+[release-please](https://github.com/googleapis/release-please).
+
+## How It Works
+
+1. Commits to `main` using
+   [Conventional Commits](https://www.conventionalcommits.org/) are
+   tracked by release-please.
+2. Release-please maintains a release PR that accumulates changes and
+   updates the changelog.
+3. When the release PR is merged, release-please creates a GitHub
+   release and a `vX.Y.Z` tag.
+4. The tag triggers the existing `release.yml` workflow, which deploys
+   to Maven Central.
+5. After tagging, release-please opens a follow-up PR to bump the
+   SNAPSHOT version in all `pom.xml` files.
+
+## Patch Release (default)
+
+Simply merge the release PR — release-please bumps the patch version
+by default (e.g. `1.5.0` -> `1.5.1`).
+
+## Minor or Major Release
+
+Add a `release-as: X.Y.0` footer to any commit on `main`:
+
+```text
+feat: add new feature
+
+release-as: 1.6.0
+```
+
+Alternatively, edit the release PR title to
+`chore(main): release 1.6.0`.
+
+## Before the Release
+
+If there have been significant changes since the last release, update
+the benchmarks before merging the release PR:
+
+```shell
+mise run update-benchmarks
+```
+
+## If the GPG Key Expired
+
+1. Generate a new key:
+   
+2. Distribute the key:
+   
+3. Use `gpg --armor --export-secret-keys YOUR_ID` to export
+   ([docs](https://github.com/actions/setup-java/blob/main/docs/advanced-usage.md#gpg))
+4. Update the passphrase:
+   
+5. Update the GPG key:
+   
diff --git a/benchmarks/README.md b/benchmarks/README.md
index 36300e4d1..b4c824d85 100644
--- a/benchmarks/README.md
+++ b/benchmarks/README.md
@@ -1,32 +1,87 @@
-Benchmarks
-----------
+# Benchmarks
 
 ## How to Run
 
+### Running benchmarks
+
+Run benchmarks and update the results in the Javadoc of the benchmark classes:
+
+```shell
+mise run update-benchmarks
 ```
+
+### Different benchmark configurations
+
+The full benchmark suite takes approximately 2 hours with JMH defaults.
+For faster iterations, use these preset configurations:
+
+| Command                       | Duration | Use Case                                 |
+| ----------------------------- | -------- | ---------------------------------------- |
+| `mise run benchmark:quick`    | ~10 min  | Quick smoke test during development      |
+| `mise run benchmark:standard` | ~60 min  | CI/nightly runs with good accuracy       |
+| `mise run benchmark:full`     | ~2 hours | Full JMH defaults for release validation |
+
+### Running benchmarks manually
+
+```shell
 java -jar ./benchmarks/target/benchmarks.jar
 ```
 
 Run only one specific benchmark:
 
-```
+```shell
 java -jar ./benchmarks/target/benchmarks.jar CounterBenchmark
 ```
 
+### Custom JMH arguments
+
+You can pass custom JMH arguments:
+
+```shell
+# Quick run: 1 fork, 1 warmup iteration, 3 measurement iterations
+mise run update-benchmarks -- --jmh-args "-f 1 -wi 1 -i 3"
+
+# Standard CI: 3 forks, 3 warmup iterations, 5 measurement iterations
+mise run update-benchmarks -- --jmh-args "-f 3 -wi 3 -i 5"
+```
+
+JMH parameter reference:
+
+- `-f N`: Number of forks (JVM restarts)
+- `-wi N`: Number of warmup iterations
+- `-i N`: Number of measurement iterations
+- `-w Ns`: Warmup iteration time (default: 10s)
+- `-r Ns`: Measurement iteration time (default: 10s)
+
 ## Results
 
 See Javadoc of the benchmark classes:
 
-* [CounterBenchmark](https://github.com/prometheus/client_java/blob/1.0.x/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/CounterBenchmark.java)
-* [HistogramBenchmark](https://github.com/prometheus/client_java/blob/1.0.x/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/HistogramBenchmark.java)
+- [CounterBenchmark](https://github.com/prometheus/client_java/blob/main/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/CounterBenchmark.java) 
+- [HistogramBenchmark](https://github.com/prometheus/client_java/blob/main/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/HistogramBenchmark.java) 
+- [TextFormatUtilBenchmark](https://github.com/prometheus/client_java/blob/main/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/TextFormatUtilBenchmark.java) 
 
 ## What Prometheus Java client optimizes for
 
 concurrent updates of metrics in multi-threaded applications.
-If your application is single-threaded and uses only one processor core, your application isn't performance critical anyway.
-If your application is designed to use all available processor cores for maximum performance, then you want a metric library that doesn't slow your application down.
-Prometheus client Java metrics support concurrent updates and scrapes. This shows in benchmarks with multiple threads recording data in shared metrics.
+If your application is single-threaded and uses only one processor core, your application isn't
+performance critical anyway.
+If your application is designed to use all available processor cores for maximum performance, then
+you want a metric library that doesn't slow your
+application down.
+Prometheus client Java metrics support concurrent updates and scrapes. This shows in benchmarks with
+multiple threads recording data in shared
+metrics.
+
+## Test the benchmark creation script
+
+To test the benchmark creation script, run:
+
+```shell
+python ./.mise/tasks/test_update-benchmarks.py
+```
 
 ## Archive
 
-The `src/main/archive/` directory contains the old benchmarks from 0.16.0 and earlier. It will be removed as soon as all benchmarks are ported to the 1.0.0 release.
+The `src/main/archive/` directory contains the old benchmarks from 0.16.0 and earlier. It will be
+removed as soon as all benchmarks are ported to the 1.0.0 release.
diff --git a/benchmarks/pom.xml b/benchmarks/pom.xml
index b57d082df..7c211006b 100644
--- a/benchmarks/pom.xml
+++ b/benchmarks/pom.xml
@@ -1,107 +1,126 @@
 
-
-    4.0.0
+
+  4.0.0
 
-    
-        io.prometheus
-        client_java
-        1.2.0-SNAPSHOT
-    
+  
+    io.prometheus
+    client_java
+    1.6.0-SNAPSHOT
+  
 
-    benchmarks
+  benchmarks
 
-    Prometheus Java Client Benchmarks
-    
-        Benchmarks of client performance, and comparison to other systems.
-    
+  Prometheus Java Client Benchmarks
+  
+    Benchmarks of client performance, and comparison to other systems.
+  
 
-    
-        1.37
-        0.16.0
-        3.0.2
-        1.30.1
-    
-
-    
-        
-            The Apache Software License, Version 2.0
-            http://www.apache.org/licenses/LICENSE-2.0.txt
-            repo
-        
-    
+  
+    1.37
+    0.16.0
+    3.0.2
+    true
+    true
+  
 
+  
     
-        
-            org.openjdk.jmh
-            jmh-core
-            ${jmh.version}
-        
-        
-            org.openjdk.jmh
-            jmh-generator-annprocess
-            ${jmh.version}
-        
-        
-        
-            io.prometheus
-            prometheus-metrics-core
-            ${project.version}
-        
-        
-            io.prometheus
-            simpleclient
-            ${simpleclient.version}
-        
-        
-          com.codahale.metrics
-          metrics-core
-          ${codahale.version}
-        
-        
-            io.opentelemetry
-            opentelemetry-api
-            ${opentelemetry.version}
-        
-        
-            io.opentelemetry
-            opentelemetry-sdk
-            ${opentelemetry.version}
-        
-        
-            io.opentelemetry
-            opentelemetry-sdk-testing
-            ${opentelemetry.version}
-        
+      
+        io.opentelemetry.instrumentation
+        opentelemetry-instrumentation-bom-alpha
+        ${otel.instrumentation.version}
+        pom
+        import
+      
     
-    
-        ${project.artifactId}
-        
-            
-                org.apache.maven.plugins
-                maven-shade-plugin
-                
-                    
-                        package
-                        
-                            shade
-                        
-                        
-                            benchmarks
-                            
-                                
-                                    io.prometheus.metrics.benchmarks.BenchmarkRunner
-                                
-                            
-                        
-                    
-                
-            
-        
-    
+  
+
+  
+    
+      org.openjdk.jmh
+      jmh-core
+      ${jmh.version}
+    
+    
+      io.prometheus
+      prometheus-metrics-core
+      ${project.version}
+    
+    
+      io.prometheus
+      prometheus-metrics-exposition-textformats
+      ${project.version}
+    
+    
+      io.prometheus
+      simpleclient
+      ${simpleclient.version}
+    
+    
+      com.codahale.metrics
+      metrics-core
+      ${codahale.version}
+    
+    
+      io.opentelemetry
+      opentelemetry-api
+    
+    
+      io.opentelemetry
+      opentelemetry-sdk
+    
+    
+      io.opentelemetry
+      opentelemetry-sdk-testing
+    
+  
+  
+    ${project.artifactId}
+    
+      
+        org.apache.maven.plugins
+        maven-compiler-plugin
+        
+          1.8
+          1.8
+          
+            
+            -parameters
+          
+          
+            
+              org.openjdk.jmh
+              jmh-generator-annprocess
+              ${jmh.version}
+            
+          
+        
+      
+      
+        org.apache.maven.plugins
+        maven-shade-plugin
+        
+          
+            package
+            
+              shade
+            
+            
+              benchmarks
+              
+                
+                  io.prometheus.metrics.benchmarks.BenchmarkRunner
+                  
+                
+              
+            
+          
+        
+      
+    
+  
 
diff --git a/benchmarks/src/archive/java/io/prometheus/client/CKMSQuantileBenchmark.java b/benchmarks/src/archive/java/io/prometheus/client/CKMSQuantileBenchmark.java
index 530810481..ab383d327 100644
--- a/benchmarks/src/archive/java/io/prometheus/client/CKMSQuantileBenchmark.java
+++ b/benchmarks/src/archive/java/io/prometheus/client/CKMSQuantileBenchmark.java
@@ -1,6 +1,11 @@
 package io.prometheus.client;
 
 import io.prometheus.client.CKMSQuantiles.Quantile;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
 import org.openjdk.jmh.annotations.*;
 import org.openjdk.jmh.infra.Blackhole;
 import org.openjdk.jmh.runner.Runner;
@@ -8,131 +13,120 @@
 import org.openjdk.jmh.runner.options.Options;
 import org.openjdk.jmh.runner.options.OptionsBuilder;
 
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-
 public class CKMSQuantileBenchmark {
 
-    @State(Scope.Benchmark)
-    public static class EmptyBenchmarkState {
-        @Param({"10000", "100000", "1000000"})
-        public int value;
-
-        List quantiles;
-        Random rand = new Random(0);
-
-        List shuffle;
-
-        Quantile mean = new Quantile(0.50, 0.050);
-        Quantile q90 = new Quantile(0.90, 0.010);
-        Quantile q95 = new Quantile(0.95, 0.005);
-        Quantile q99 = new Quantile(0.99, 0.001);
-
-        @Setup(Level.Trial)
-        public void setup() {
-            quantiles = new ArrayList();
-            quantiles.add(mean);
-            quantiles.add(q90);
-            quantiles.add(q95);
-            quantiles.add(q99);
-
-            shuffle = new ArrayList(value);
-            for (int i = 0; i < value; i++) {
-                shuffle.add((double) i);
-            }
-            Collections.shuffle(shuffle, rand);
-        }
-    }
-
-    @Benchmark
-    @BenchmarkMode({Mode.AverageTime})
-    @OutputTimeUnit(TimeUnit.MILLISECONDS)
-    public void ckmsQuantileInsertBenchmark(EmptyBenchmarkState state) {
-        CKMSQuantiles q = new CKMSQuantiles(state.quantiles.toArray(new Quantile[]{}));
-        for (Double l : state.shuffle) {
-            q.insert(l);
-        }
-    }
-
-    /** prefilled benchmark, means that we already have a filled and compressed samples available */
-    @State(Scope.Benchmark)
-    public static class PrefilledBenchmarkState {
-        @Param({"10000", "100000", "1000000"})
-        public int value;
-
-
-        CKMSQuantiles ckmsQuantiles;
-
-        List quantiles;
-        Random rand = new Random(0);
-
-        Quantile mean = new Quantile(0.50, 0.050);
-        Quantile q90 = new Quantile(0.90, 0.010);
-        Quantile q95 = new Quantile(0.95, 0.005);
-        Quantile q99 = new Quantile(0.99, 0.001);
-        List shuffle;
-
-        int rank = (int) (value * q95.quantile);
-
-
-        @Setup(Level.Trial)
-        public void setup() {
-            quantiles = new ArrayList();
-            quantiles.add(mean);
-            quantiles.add(q90);
-            quantiles.add(q95);
-            quantiles.add(q99);
-
-            shuffle = new ArrayList(value);
-            for (int i = 0; i < value; i++) {
-                shuffle.add((double) i);
-            }
-            Collections.shuffle(shuffle, rand);
-
-
-            ckmsQuantiles = new CKMSQuantiles(quantiles.toArray(new Quantile[]{}));
-            for (Double l : shuffle) {
-                ckmsQuantiles.insert(l);
-            }
-            // make sure we inserted all 'hanging' samples (count % 128)
-            ckmsQuantiles.get(0);
-            // compress everything so we have a similar samples size regardless of n.
-            ckmsQuantiles.compress();
-            System.out.println("Sample size is: " + ckmsQuantiles.samples.size());
-        }
-
-    }
-
-    @Benchmark
-    @BenchmarkMode({Mode.AverageTime})
-    @OutputTimeUnit(TimeUnit.NANOSECONDS)
-    public void ckmsQuantileGetBenchmark(Blackhole blackhole, PrefilledBenchmarkState state) {
-        blackhole.consume(state.ckmsQuantiles.get(state.q90.quantile));
+  @State(Scope.Benchmark)
+  public static class EmptyBenchmarkState {
+    @Param({"10000", "100000", "1000000"})
+    public int value;
+
+    List quantiles;
+    Random rand = new Random(0);
+
+    List shuffle;
+
+    Quantile mean = new Quantile(0.50, 0.050);
+    Quantile q90 = new Quantile(0.90, 0.010);
+    Quantile q95 = new Quantile(0.95, 0.005);
+    Quantile q99 = new Quantile(0.99, 0.001);
+
+    @Setup(Level.Trial)
+    public void setup() {
+      quantiles = new ArrayList();
+      quantiles.add(mean);
+      quantiles.add(q90);
+      quantiles.add(q95);
+      quantiles.add(q99);
+
+      shuffle = new ArrayList(value);
+      for (int i = 0; i < value; i++) {
+        shuffle.add((double) i);
+      }
+      Collections.shuffle(shuffle, rand);
     }
-
-    /**
-     * benchmark for the f method.
-     */
-    @Benchmark
-    @BenchmarkMode({Mode.AverageTime})
-    @OutputTimeUnit(TimeUnit.NANOSECONDS)
-    public void ckmsQuantileF(Blackhole blackhole, PrefilledBenchmarkState state) {
-        blackhole.consume(state.ckmsQuantiles.f(state.rank));
+  }
+
+  @Benchmark
+  @BenchmarkMode({Mode.AverageTime})
+  @OutputTimeUnit(TimeUnit.MILLISECONDS)
+  public void ckmsQuantileInsertBenchmark(EmptyBenchmarkState state) {
+    CKMSQuantiles q = new CKMSQuantiles(state.quantiles.toArray(new Quantile[] {}));
+    for (Double l : state.shuffle) {
+      q.insert(l);
     }
-
-    public static void main(String[] args) throws RunnerException {
-
-        Options opt = new OptionsBuilder()
-                .include(CKMSQuantileBenchmark.class.getSimpleName())
-                .warmupIterations(5)
-                .measurementIterations(4)
-                .threads(1)
-                .forks(1)
-                .build();
-
-        new Runner(opt).run();
+  }
+
+  /** prefilled benchmark, means that we already have a filled and compressed samples available */
+  @State(Scope.Benchmark)
+  public static class PrefilledBenchmarkState {
+    @Param({"10000", "100000", "1000000"})
+    public int value;
+
+    CKMSQuantiles ckmsQuantiles;
+
+    List quantiles;
+    Random rand = new Random(0);
+
+    Quantile mean = new Quantile(0.50, 0.050);
+    Quantile q90 = new Quantile(0.90, 0.010);
+    Quantile q95 = new Quantile(0.95, 0.005);
+    Quantile q99 = new Quantile(0.99, 0.001);
+    List shuffle;
+
+    int rank = (int) (value * q95.quantile);
+
+    @Setup(Level.Trial)
+    public void setup() {
+      quantiles = new ArrayList();
+      quantiles.add(mean);
+      quantiles.add(q90);
+      quantiles.add(q95);
+      quantiles.add(q99);
+
+      shuffle = new ArrayList(value);
+      for (int i = 0; i < value; i++) {
+        shuffle.add((double) i);
+      }
+      Collections.shuffle(shuffle, rand);
+
+      ckmsQuantiles = new CKMSQuantiles(quantiles.toArray(new Quantile[] {}));
+      for (Double l : shuffle) {
+        ckmsQuantiles.insert(l);
+      }
+      // make sure we inserted all 'hanging' samples (count % 128)
+      ckmsQuantiles.get(0);
+      // compress everything so we have a similar samples size regardless of n.
+      ckmsQuantiles.compress();
+      System.out.println("Sample size is: " + ckmsQuantiles.samples.size());
     }
+  }
+
+  @Benchmark
+  @BenchmarkMode({Mode.AverageTime})
+  @OutputTimeUnit(TimeUnit.NANOSECONDS)
+  public void ckmsQuantileGetBenchmark(Blackhole blackhole, PrefilledBenchmarkState state) {
+    blackhole.consume(state.ckmsQuantiles.get(state.q90.quantile));
+  }
+
+  /** benchmark for the f method. */
+  @Benchmark
+  @BenchmarkMode({Mode.AverageTime})
+  @OutputTimeUnit(TimeUnit.NANOSECONDS)
+  public void ckmsQuantileF(Blackhole blackhole, PrefilledBenchmarkState state) {
+    blackhole.consume(state.ckmsQuantiles.f(state.rank));
+  }
+
+  public static void main(String[] args) throws RunnerException {
+
+    Options opt =
+        new OptionsBuilder()
+            .include(CKMSQuantileBenchmark.class.getSimpleName())
+            .warmupIterations(5)
+            .measurementIterations(4)
+            .threads(1)
+            .forks(1)
+            .build();
+
+    new Runner(opt).run();
+  }
 }
diff --git a/benchmarks/src/archive/java/io/prometheus/client/benchmark/CounterBenchmark.java b/benchmarks/src/archive/java/io/prometheus/client/benchmark/CounterBenchmark.java
index 592efc6f2..c37076156 100644
--- a/benchmarks/src/archive/java/io/prometheus/client/benchmark/CounterBenchmark.java
+++ b/benchmarks/src/archive/java/io/prometheus/client/benchmark/CounterBenchmark.java
@@ -1,6 +1,7 @@
 package io.prometheus.client.benchmark;
 
 import com.codahale.metrics.MetricRegistry;
+import java.util.concurrent.TimeUnit;
 import org.openjdk.jmh.annotations.Benchmark;
 import org.openjdk.jmh.annotations.BenchmarkMode;
 import org.openjdk.jmh.annotations.Mode;
@@ -13,8 +14,6 @@
 import org.openjdk.jmh.runner.options.Options;
 import org.openjdk.jmh.runner.options.OptionsBuilder;
 
-import java.util.concurrent.TimeUnit;
-
 @State(Scope.Benchmark)
 public class CounterBenchmark {
 
@@ -28,16 +27,16 @@ public class CounterBenchmark {
 
   @Setup
   public void setup() {
-    prometheusSimpleCounter = io.prometheus.client.Counter.build()
-      .name("name")
-      .help("some description..")
-      .labelNames("some", "group").create();
+    prometheusSimpleCounter =
+        io.prometheus.client.Counter.build()
+            .name("name")
+            .help("some description..")
+            .labelNames("some", "group")
+            .create();
     prometheusSimpleCounterChild = prometheusSimpleCounter.labels("test", "group");
 
-    prometheusSimpleCounterNoLabels = io.prometheus.client.Counter.build()
-      .name("name")
-      .help("some description..")
-      .create();
+    prometheusSimpleCounterNoLabels =
+        io.prometheus.client.Counter.build().name("name").help("some description..").create();
 
     registry = new MetricRegistry();
     codahaleCounter = registry.counter("counter");
@@ -48,21 +47,21 @@ public void setup() {
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleCounterIncBenchmark() {
-    prometheusSimpleCounter.labels("test", "group").inc(); 
+    prometheusSimpleCounter.labels("test", "group").inc();
   }
-  
+
   @Benchmark
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleCounterChildIncBenchmark() {
-    prometheusSimpleCounterChild.inc(); 
+    prometheusSimpleCounterChild.inc();
   }
 
   @Benchmark
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleCounterNoLabelsIncBenchmark() {
-    prometheusSimpleCounterNoLabels.inc(); 
+    prometheusSimpleCounterNoLabels.inc();
   }
 
   @Benchmark
@@ -81,13 +80,14 @@ public void codahaleMeterMarkBenchmark() {
 
   public static void main(String[] args) throws RunnerException {
 
-    Options opt = new OptionsBuilder()
-      .include(CounterBenchmark.class.getSimpleName())
-      .warmupIterations(5)
-      .measurementIterations(4)
-      .threads(4)
-      .forks(1)
-      .build();
+    Options opt =
+        new OptionsBuilder()
+            .include(CounterBenchmark.class.getSimpleName())
+            .warmupIterations(5)
+            .measurementIterations(4)
+            .threads(4)
+            .forks(1)
+            .build();
 
     new Runner(opt).run();
   }
diff --git a/benchmarks/src/archive/java/io/prometheus/client/benchmark/ExemplarsBenchmark.java b/benchmarks/src/archive/java/io/prometheus/client/benchmark/ExemplarsBenchmark.java
index 7d033afcd..a3fa45ae1 100644
--- a/benchmarks/src/archive/java/io/prometheus/client/benchmark/ExemplarsBenchmark.java
+++ b/benchmarks/src/archive/java/io/prometheus/client/benchmark/ExemplarsBenchmark.java
@@ -3,6 +3,7 @@
 import io.prometheus.client.Counter;
 import io.prometheus.client.exemplars.DefaultExemplarSampler;
 import io.prometheus.client.exemplars.tracer.common.SpanContextSupplier;
+import java.util.concurrent.TimeUnit;
 import org.openjdk.jmh.annotations.Benchmark;
 import org.openjdk.jmh.annotations.BenchmarkMode;
 import org.openjdk.jmh.annotations.Mode;
@@ -11,8 +12,6 @@
 import org.openjdk.jmh.annotations.Setup;
 import org.openjdk.jmh.annotations.State;
 
-import java.util.concurrent.TimeUnit;
-
 @State(Scope.Benchmark)
 public class ExemplarsBenchmark {
 
@@ -23,25 +22,28 @@ public class ExemplarsBenchmark {
   @Setup
   public void setup() {
 
-    counter = Counter.build()
-        .name("counter_total")
-        .help("Total number of requests.")
-        .labelNames("path")
-        .create();
+    counter =
+        Counter.build()
+            .name("counter_total")
+            .help("Total number of requests.")
+            .labelNames("path")
+            .create();
 
-    counterWithExemplars = Counter.build()
-        .name("counter_with_exemplars_total")
-        .help("Total number of requests.")
-        .labelNames("path")
-        .withExemplarSampler(new DefaultExemplarSampler(new MockSpanContextSupplier()))
-        .create();
+    counterWithExemplars =
+        Counter.build()
+            .name("counter_with_exemplars_total")
+            .help("Total number of requests.")
+            .labelNames("path")
+            .withExemplarSampler(new DefaultExemplarSampler(new MockSpanContextSupplier()))
+            .create();
 
-    counterWithoutExemplars = Counter.build()
-        .name("counter_without_exemplars_total")
-        .help("Total number of requests.")
-        .labelNames("path")
-        .withoutExemplars()
-        .create();
+    counterWithoutExemplars =
+        Counter.build()
+            .name("counter_without_exemplars_total")
+            .help("Total number of requests.")
+            .labelNames("path")
+            .withoutExemplars()
+            .create();
   }
 
   @Benchmark
@@ -79,7 +81,7 @@ public String getSpanId() {
 
     @Override
     public boolean isSampled() {
-        return true;
+      return true;
     }
   }
 }
diff --git a/benchmarks/src/archive/java/io/prometheus/client/benchmark/GaugeBenchmark.java b/benchmarks/src/archive/java/io/prometheus/client/benchmark/GaugeBenchmark.java
index d088d280d..a8eb03c83 100644
--- a/benchmarks/src/archive/java/io/prometheus/client/benchmark/GaugeBenchmark.java
+++ b/benchmarks/src/archive/java/io/prometheus/client/benchmark/GaugeBenchmark.java
@@ -1,6 +1,7 @@
 package io.prometheus.client.benchmark;
 
 import com.codahale.metrics.MetricRegistry;
+import java.util.concurrent.TimeUnit;
 import org.openjdk.jmh.annotations.Benchmark;
 import org.openjdk.jmh.annotations.BenchmarkMode;
 import org.openjdk.jmh.annotations.Mode;
@@ -13,8 +14,6 @@
 import org.openjdk.jmh.runner.options.Options;
 import org.openjdk.jmh.runner.options.OptionsBuilder;
 
-import java.util.concurrent.TimeUnit;
-
 @State(Scope.Benchmark)
 public class GaugeBenchmark {
 
@@ -27,16 +26,16 @@ public class GaugeBenchmark {
 
   @Setup
   public void setup() {
-    prometheusSimpleGauge = io.prometheus.client.Gauge.build()
-      .name("name")
-      .help("some description..")
-      .labelNames("some", "group").create();
+    prometheusSimpleGauge =
+        io.prometheus.client.Gauge.build()
+            .name("name")
+            .help("some description..")
+            .labelNames("some", "group")
+            .create();
     prometheusSimpleGaugeChild = prometheusSimpleGauge.labels("test", "group");
 
-    prometheusSimpleGaugeNoLabels = io.prometheus.client.Gauge.build()
-      .name("name")
-      .help("some description..")
-      .create();
+    prometheusSimpleGaugeNoLabels =
+        io.prometheus.client.Gauge.build().name("name").help("some description..").create();
 
     registry = new MetricRegistry();
     codahaleCounter = registry.counter("name");
@@ -47,21 +46,21 @@ public void setup() {
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleGaugeIncBenchmark() {
-    prometheusSimpleGauge.labels("test", "group").inc(); 
+    prometheusSimpleGauge.labels("test", "group").inc();
   }
-  
+
   @Benchmark
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleGaugeChildIncBenchmark() {
-    prometheusSimpleGaugeChild.inc(); 
+    prometheusSimpleGaugeChild.inc();
   }
 
   @Benchmark
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleGaugeNoLabelsIncBenchmark() {
-    prometheusSimpleGaugeNoLabels.inc(); 
+    prometheusSimpleGaugeNoLabels.inc();
   }
 
   @Benchmark
@@ -71,27 +70,26 @@ public void codahaleCounterIncBenchmark() {
     codahaleCounter.inc();
   }
 
-
   // Decrement.
   @Benchmark
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleGaugeDecBenchmark() {
-    prometheusSimpleGauge.labels("test", "group").dec(); 
+    prometheusSimpleGauge.labels("test", "group").dec();
   }
-  
+
   @Benchmark
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleGaugeChildDecBenchmark() {
-    prometheusSimpleGaugeChild.dec(); 
+    prometheusSimpleGaugeChild.dec();
   }
 
   @Benchmark
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleGaugeNoLabelsDecBenchmark() {
-    prometheusSimpleGaugeNoLabels.dec(); 
+    prometheusSimpleGaugeNoLabels.dec();
   }
 
   @Benchmark
@@ -106,9 +104,9 @@ public void codahaleCounterDecBenchmark() {
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleGaugeSetBenchmark() {
-    prometheusSimpleGauge.labels("test", "group").set(42); 
+    prometheusSimpleGauge.labels("test", "group").set(42);
   }
-  
+
   @Benchmark
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
@@ -120,18 +118,19 @@ public void prometheusSimpleGaugeChildSetBenchmark() {
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleGaugeNoLabelsSetBenchmark() {
-    prometheusSimpleGaugeNoLabels.set(42); 
+    prometheusSimpleGaugeNoLabels.set(42);
   }
 
   public static void main(String[] args) throws RunnerException {
 
-    Options opt = new OptionsBuilder()
-      .include(GaugeBenchmark.class.getSimpleName())
-      .warmupIterations(5)
-      .measurementIterations(4)
-      .threads(4)
-      .forks(1)
-      .build();
+    Options opt =
+        new OptionsBuilder()
+            .include(GaugeBenchmark.class.getSimpleName())
+            .warmupIterations(5)
+            .measurementIterations(4)
+            .threads(4)
+            .forks(1)
+            .build();
 
     new Runner(opt).run();
   }
diff --git a/benchmarks/src/archive/java/io/prometheus/client/benchmark/SanitizeMetricNameBenchmark.java b/benchmarks/src/archive/java/io/prometheus/client/benchmark/SanitizeMetricNameBenchmark.java
index d89e676e5..cb5f300ce 100644
--- a/benchmarks/src/archive/java/io/prometheus/client/benchmark/SanitizeMetricNameBenchmark.java
+++ b/benchmarks/src/archive/java/io/prometheus/client/benchmark/SanitizeMetricNameBenchmark.java
@@ -1,12 +1,12 @@
 package io.prometheus.client.benchmark;
 
-import com.codahale.metrics.MetricRegistry;
+import io.prometheus.client.Collector;
+import java.util.concurrent.TimeUnit;
 import org.openjdk.jmh.annotations.Benchmark;
 import org.openjdk.jmh.annotations.BenchmarkMode;
 import org.openjdk.jmh.annotations.Mode;
 import org.openjdk.jmh.annotations.OutputTimeUnit;
 import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
 import org.openjdk.jmh.annotations.State;
 import org.openjdk.jmh.runner.Runner;
 import org.openjdk.jmh.runner.RunnerException;
@@ -14,23 +14,18 @@
 import org.openjdk.jmh.runner.options.OptionsBuilder;
 import org.openjdk.jmh.runner.options.TimeValue;
 
-import io.prometheus.client.Collector;
-
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-
 @State(Scope.Benchmark)
 public class SanitizeMetricNameBenchmark {
 
   @Benchmark
-  @BenchmarkMode({ Mode.AverageTime })
+  @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void sanitizeSanitizedName() {
     Collector.sanitizeMetricName("good_name");
   }
 
   @Benchmark
-  @BenchmarkMode({ Mode.AverageTime })
+  @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void sanitizeNonSanitizedName() {
     Collector.sanitizeMetricName("9not_good_name!");
@@ -38,15 +33,16 @@ public void sanitizeNonSanitizedName() {
 
   public static void main(String[] args) throws RunnerException {
 
-    Options opt = new OptionsBuilder()
-        .include(SanitizeMetricNameBenchmark.class.getSimpleName())
-        .warmupIterations(5)
-        .measurementIterations(4)
-        .measurementTime(TimeValue.seconds(1))
-        .warmupTime(TimeValue.seconds(1))
-        .threads(4)
-        .forks(1)
-        .build();
+    Options opt =
+        new OptionsBuilder()
+            .include(SanitizeMetricNameBenchmark.class.getSimpleName())
+            .warmupIterations(5)
+            .measurementIterations(4)
+            .measurementTime(TimeValue.seconds(1))
+            .warmupTime(TimeValue.seconds(1))
+            .threads(4)
+            .forks(1)
+            .build();
 
     new Runner(opt).run();
   }
diff --git a/benchmarks/src/archive/java/io/prometheus/client/benchmark/SummaryBenchmark.java b/benchmarks/src/archive/java/io/prometheus/client/benchmark/SummaryBenchmark.java
index 973106c68..2964b8ea8 100644
--- a/benchmarks/src/archive/java/io/prometheus/client/benchmark/SummaryBenchmark.java
+++ b/benchmarks/src/archive/java/io/prometheus/client/benchmark/SummaryBenchmark.java
@@ -1,6 +1,7 @@
 package io.prometheus.client.benchmark;
 
 import com.codahale.metrics.MetricRegistry;
+import java.util.concurrent.TimeUnit;
 import org.openjdk.jmh.annotations.Benchmark;
 import org.openjdk.jmh.annotations.BenchmarkMode;
 import org.openjdk.jmh.annotations.Mode;
@@ -13,8 +14,6 @@
 import org.openjdk.jmh.runner.options.Options;
 import org.openjdk.jmh.runner.options.OptionsBuilder;
 
-import java.util.concurrent.TimeUnit;
-
 @State(Scope.Benchmark)
 public class SummaryBenchmark {
 
@@ -30,27 +29,27 @@ public class SummaryBenchmark {
 
   @Setup
   public void setup() {
-    prometheusSimpleSummary = io.prometheus.client.Summary.build()
-      .name("name")
-      .help("some description..")
-      .labelNames("some", "group").create();
+    prometheusSimpleSummary =
+        io.prometheus.client.Summary.build()
+            .name("name")
+            .help("some description..")
+            .labelNames("some", "group")
+            .create();
     prometheusSimpleSummaryChild = prometheusSimpleSummary.labels("test", "group");
 
-    prometheusSimpleSummaryNoLabels = io.prometheus.client.Summary.build()
-      .name("name")
-      .help("some description..")
-      .create();
+    prometheusSimpleSummaryNoLabels =
+        io.prometheus.client.Summary.build().name("name").help("some description..").create();
 
-    prometheusSimpleHistogram = io.prometheus.client.Histogram.build()
-      .name("name")
-      .help("some description..")
-      .labelNames("some", "group").create();
+    prometheusSimpleHistogram =
+        io.prometheus.client.Histogram.build()
+            .name("name")
+            .help("some description..")
+            .labelNames("some", "group")
+            .create();
     prometheusSimpleHistogramChild = prometheusSimpleHistogram.labels("test", "group");
 
-    prometheusSimpleHistogramNoLabels = io.prometheus.client.Histogram.build()
-      .name("name")
-      .help("some description..")
-      .create();
+    prometheusSimpleHistogramNoLabels =
+        io.prometheus.client.Histogram.build().name("name").help("some description..").create();
 
     registry = new MetricRegistry();
     codahaleHistogram = registry.histogram("name");
@@ -60,28 +59,28 @@ public void setup() {
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleSummaryBenchmark() {
-    prometheusSimpleSummary.labels("test", "group").observe(1) ;
+    prometheusSimpleSummary.labels("test", "group").observe(1);
   }
 
   @Benchmark
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleSummaryChildBenchmark() {
-    prometheusSimpleSummaryChild.observe(1); 
+    prometheusSimpleSummaryChild.observe(1);
   }
 
   @Benchmark
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleSummaryNoLabelsBenchmark() {
-    prometheusSimpleSummaryNoLabels.observe(1); 
+    prometheusSimpleSummaryNoLabels.observe(1);
   }
 
   @Benchmark
   @BenchmarkMode({Mode.AverageTime})
   @OutputTimeUnit(TimeUnit.NANOSECONDS)
   public void prometheusSimpleHistogramBenchmark() {
-    prometheusSimpleHistogram.labels("test", "group").observe(1) ;
+    prometheusSimpleHistogram.labels("test", "group").observe(1);
   }
 
   @Benchmark
@@ -107,13 +106,14 @@ public void codahaleHistogramBenchmark() {
 
   public static void main(String[] args) throws RunnerException {
 
-    Options opt = new OptionsBuilder()
-      .include(SummaryBenchmark.class.getSimpleName())
-      .warmupIterations(5)
-      .measurementIterations(4)
-      .threads(4)
-      .forks(1)
-      .build();
+    Options opt =
+        new OptionsBuilder()
+            .include(SummaryBenchmark.class.getSimpleName())
+            .warmupIterations(5)
+            .measurementIterations(4)
+            .threads(4)
+            .forks(1)
+            .build();
 
     new Runner(opt).run();
   }
diff --git a/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/BenchmarkRunner.java b/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/BenchmarkRunner.java
index 6503c0d60..9d5d242ae 100644
--- a/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/BenchmarkRunner.java
+++ b/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/BenchmarkRunner.java
@@ -1,7 +1,7 @@
 package io.prometheus.metrics.benchmarks;
 
 public class BenchmarkRunner {
-    public static void main(String[] args) throws Exception {
-        org.openjdk.jmh.Main.main(args);
-    }
+  public static void main(String[] args) throws Exception {
+    org.openjdk.jmh.Main.main(args);
+  }
 }
diff --git a/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/CounterBenchmark.java b/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/CounterBenchmark.java
index 31678ad10..f5d0a1a0f 100644
--- a/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/CounterBenchmark.java
+++ b/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/CounterBenchmark.java
@@ -18,196 +18,189 @@
 import org.openjdk.jmh.annotations.Threads;
 
 /**
- * Results on a machine with dedicated 8 vCPU cores:
+ * Results on a machine with dedicated Ubuntu 24.04 LTS, AMD Ryzen™ 9 7900 × 24, 96.0 GiB RAM:
+ *
  * 
- * Benchmark                                                                  Mode  Cnt      Score     Error  Units
- * i.p.metrics.benchmarks.CounterBenchmark.codahaleIncNoLabels               thrpt   25  30978.055 ± 424.088  ops/s
- * i.p.metrics.benchmarks.CounterBenchmark.openTelemetryAdd                  thrpt   25  12682.744 ± 162.425  ops/s
- * i.p.metrics.benchmarks.CounterBenchmark.openTelemetryInc                  thrpt   25  14434.710 ±  99.809  ops/s
- * i.p.metrics.benchmarks.CounterBenchmark.openTelemetryIncNoLabels          thrpt   25  16634.416 ±  13.098  ops/s
- * i.p.metrics.benchmarks.CounterBenchmark.prometheusAdd                     thrpt   25  37317.024 ± 283.064  ops/s
- * i.p.metrics.benchmarks.CounterBenchmark.prometheusInc                     thrpt   25  39436.278 ± 458.583  ops/s
- * i.p.metrics.benchmarks.CounterBenchmark.prometheusNoLabelsInc             thrpt   25  34752.910 ± 293.979  ops/s
- * i.p.metrics.benchmarks.CounterBenchmark.simpleclientAdd                   thrpt   25   9520.592 ± 245.787  ops/s
- * i.p.metrics.benchmarks.CounterBenchmark.simpleclientInc                   thrpt   25   9057.637 ±  67.761  ops/s
- * i.p.metrics.benchmarks.CounterBenchmark.simpleclientNoLabelsInc           thrpt   25   8993.471 ±  49.581  ops/s
+ * Benchmark                                             Mode  Cnt       Score       Error  Units
+ * CounterBenchmark.codahaleIncNoLabels                 thrpt   25  144632.191 ±  2778.333  ops/s
+ * CounterBenchmark.openTelemetryAdd                    thrpt   25    2165.775 ±   168.554  ops/s
+ * CounterBenchmark.openTelemetryInc                    thrpt   25    1940.143 ±    86.223  ops/s
+ * CounterBenchmark.openTelemetryIncNoLabels            thrpt   25    1880.089 ±   192.395  ops/s
+ * CounterBenchmark.prometheusAdd                       thrpt   25  122427.789 ±  1377.485  ops/s
+ * CounterBenchmark.prometheusInc                       thrpt   25  183603.131 ±  2812.874  ops/s
+ * CounterBenchmark.prometheusNoLabelsInc               thrpt   25  169733.499 ±   670.495  ops/s
+ * CounterBenchmark.simpleclientAdd                     thrpt   25   13771.151 ±    77.473  ops/s
+ * CounterBenchmark.simpleclientInc                     thrpt   25   14255.342 ±   117.339  ops/s
+ * CounterBenchmark.simpleclientNoLabelsInc             thrpt   25   14175.465 ±    56.575  ops/s
  * 
- * Prometheus counters are faster than counters of other libraries. For example, incrementing a single counter - * without labels is more than 2 times faster (34752 ops / second) than doing the same with an OpenTelemetry - * counter (16634 ops / sec). + * + * Prometheus counters are faster than counters of other libraries. For example, incrementing a + * single counter without labels is more than 2 times faster (34752 ops / second) than doing the + * same with an OpenTelemetry counter (16634 ops / sec). */ public class CounterBenchmark { - @State(Scope.Benchmark) - public static class PrometheusCounter { - - final Counter noLabels; - final CounterDataPoint dataPoint; - - public PrometheusCounter() { - noLabels = Counter.builder() - .name("test") - .help("help") - .build(); - - Counter labels = Counter.builder() - .name("test") - .help("help") - .labelNames("path", "status") - .build(); - this.dataPoint = labels.labelValues("/", "200"); - } - } + @State(Scope.Benchmark) + public static class PrometheusCounter { - @State(Scope.Benchmark) - public static class SimpleclientCounter { + final Counter noLabels; + final CounterDataPoint dataPoint; - final io.prometheus.client.Counter noLabels; - final io.prometheus.client.Counter.Child dataPoint; + public PrometheusCounter() { + noLabels = Counter.builder().name("test").help("help").build(); - public SimpleclientCounter() { - noLabels = io.prometheus.client.Counter.build() - .name("name") - .help("help") - .create(); + Counter labels = + Counter.builder().name("test").help("help").labelNames("path", "status").build(); + this.dataPoint = labels.labelValues("/", "200"); + } + } - io.prometheus.client.Counter counter = io.prometheus.client.Counter.build() - .name("name") - .help("help") - .labelNames("path", "status") - .create(); + @State(Scope.Benchmark) + public static class SimpleclientCounter { - this.dataPoint = counter.labels("/", "200"); - } - } + final io.prometheus.client.Counter noLabels; + final io.prometheus.client.Counter.Child dataPoint; - @State(Scope.Benchmark) - public static class CodahaleCounterNoLabels { - final com.codahale.metrics.Counter counter = new com.codahale.metrics.MetricRegistry().counter("test"); - } + public SimpleclientCounter() { + noLabels = io.prometheus.client.Counter.build().name("name").help("help").create(); - @State(Scope.Benchmark) - public static class OpenTelemetryCounter { - - final LongCounter longCounter; - final DoubleCounter doubleCounter; - final Attributes attributes; - - public OpenTelemetryCounter() { - - SdkMeterProvider sdkMeterProvider = SdkMeterProvider.builder() - .registerMetricReader(InMemoryMetricReader.create()) - .setResource(Resource.getDefault()) - .build(); - OpenTelemetry openTelemetry = OpenTelemetrySdk.builder() - .setMeterProvider(sdkMeterProvider) - .build(); - Meter meter = openTelemetry - .meterBuilder("instrumentation-library-name") - .setInstrumentationVersion("1.0.0") - .build(); - this.longCounter = meter - .counterBuilder("test1") - .setDescription("test") - .build(); - this.doubleCounter = meter - .counterBuilder("test2") - .ofDoubles() - .setDescription("test") - .build(); - this.attributes = Attributes.of( - AttributeKey.stringKey("path"), "/", - AttributeKey.stringKey("status"), "200"); - } - } + io.prometheus.client.Counter counter = + io.prometheus.client.Counter.build() + .name("name") + .help("help") + .labelNames("path", "status") + .create(); - @Benchmark - @Threads(4) - public CounterDataPoint prometheusAdd(RandomNumbers randomNumbers, PrometheusCounter counter) { - for (int i=0; i - * Benchmark Mode Cnt Score Error Units - * i.p.metrics.benchmarks.HistogramBenchmark.openTelemetryClassic thrpt 25 1908.715 ± 114.050 ops/s - * i.p.metrics.benchmarks.HistogramBenchmark.openTelemetryExponential thrpt 25 1009.785 ± 12.965 ops/s - * i.p.metrics.benchmarks.HistogramBenchmark.prometheusClassic thrpt 25 6451.533 ± 326.265 ops/s - * i.p.metrics.benchmarks.HistogramBenchmark.prometheusNative thrpt 25 3372.789 ± 339.328 ops/s - * i.p.metrics.benchmarks.HistogramBenchmark.simpleclient thrpt 25 6488.252 ± 96.737 ops/s + * Benchmark Mode Cnt Score Error Units + * HistogramBenchmark.openTelemetryClassic thrpt 25 968.178 ± 28.582 ops/s + * HistogramBenchmark.openTelemetryExponential thrpt 25 836.000 ± 17.709 ops/s + * HistogramBenchmark.prometheusClassic thrpt 25 7010.393 ± 683.782 ops/s + * HistogramBenchmark.prometheusNative thrpt 25 5040.572 ± 284.433 ops/s + * HistogramBenchmark.simpleclient thrpt 25 10485.462 ± 41.265 ops/s *
+ * * The simpleclient (i.e. client_java version 0.16.0 and older) histograms perform about the same as * the classic histogram of the current 1.0.0 version. - *

- * Compared to OpenTelemetry histograms the Prometheus Java client histograms perform more than 3 times better - * (OpenTelemetry has 1908 ops / sec for classic histograms, while Prometheus has 6451 ops / sec). + * + *

Compared to OpenTelemetry histograms the Prometheus Java client histograms perform more than 3 + * times better (OpenTelemetry has 1908 ops / sec for classic histograms, while Prometheus has 6451 + * ops / sec). */ - public class HistogramBenchmark { - @State(Scope.Benchmark) - public static class PrometheusClassicHistogram { + @State(Scope.Benchmark) + public static class PrometheusClassicHistogram { - final Histogram noLabels; + final Histogram noLabels; - public PrometheusClassicHistogram() { - noLabels = Histogram.builder() - .name("test") - .help("help") - .classicOnly() - .build(); - } + public PrometheusClassicHistogram() { + noLabels = Histogram.builder().name("test").help("help").classicOnly().build(); } - - @State(Scope.Benchmark) - public static class PrometheusNativeHistogram { - - final Histogram noLabels; - - public PrometheusNativeHistogram() { - noLabels = Histogram.builder() - .name("test") - .help("help") - .nativeOnly() - .nativeInitialSchema(5) - .nativeMaxNumberOfBuckets(0) - .build(); - } + } + + @State(Scope.Benchmark) + public static class PrometheusNativeHistogram { + + final Histogram noLabels; + + public PrometheusNativeHistogram() { + noLabels = + Histogram.builder() + .name("test") + .help("help") + .nativeOnly() + .nativeInitialSchema(5) + .nativeMaxNumberOfBuckets(0) + .build(); } + } - @State(Scope.Benchmark) - public static class SimpleclientHistogram { + @State(Scope.Benchmark) + public static class SimpleclientHistogram { - final io.prometheus.client.Histogram noLabels; + final io.prometheus.client.Histogram noLabels; - public SimpleclientHistogram() { - noLabels = io.prometheus.client.Histogram.build() - .name("name") - .help("help") - .create(); - } + public SimpleclientHistogram() { + noLabels = io.prometheus.client.Histogram.build().name("name").help("help").create(); } - - @State(Scope.Benchmark) - public static class OpenTelemetryClassicHistogram { - - final io.opentelemetry.api.metrics.DoubleHistogram histogram; - - public OpenTelemetryClassicHistogram() { - - SdkMeterProvider sdkMeterProvider = SdkMeterProvider.builder() - .registerMetricReader(InMemoryMetricReader.create()) - .setResource(Resource.getDefault()) - .registerView(InstrumentSelector.builder() - .setName("test") - .build(), - View.builder() - .setAggregation(Aggregation.explicitBucketHistogram(Arrays.asList(.005, .01, .025, .05, .1, .25, .5, 1.0, 2.5, 5.0, 10.0))) - .build() - ) - .build(); - OpenTelemetry openTelemetry = OpenTelemetrySdk.builder() - .setMeterProvider(sdkMeterProvider) - .build(); - Meter meter = openTelemetry - .meterBuilder("instrumentation-library-name") - .setInstrumentationVersion("1.0.0") - .build(); - this.histogram = meter - .histogramBuilder("test") - .setDescription("test") - .build(); - } + } + + @State(Scope.Benchmark) + public static class OpenTelemetryClassicHistogram { + + final io.opentelemetry.api.metrics.DoubleHistogram histogram; + + public OpenTelemetryClassicHistogram() { + + SdkMeterProvider sdkMeterProvider = + SdkMeterProvider.builder() + .registerMetricReader(InMemoryMetricReader.create()) + .setResource(Resource.getDefault()) + .registerView( + InstrumentSelector.builder().setName("test").build(), + View.builder() + .setAggregation( + Aggregation.explicitBucketHistogram( + Arrays.asList( + .005, .01, .025, .05, .1, .25, .5, 1.0, 2.5, 5.0, 10.0))) + .build()) + .build(); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(sdkMeterProvider).build(); + Meter meter = + openTelemetry + .meterBuilder("instrumentation-library-name") + .setInstrumentationVersion("1.0.0") + .build(); + this.histogram = meter.histogramBuilder("test").setDescription("test").build(); } - - @State(Scope.Benchmark) - public static class OpenTelemetryExponentialHistogram { - - final io.opentelemetry.api.metrics.DoubleHistogram histogram; - - public OpenTelemetryExponentialHistogram() { - - SdkMeterProvider sdkMeterProvider = SdkMeterProvider.builder() - .registerMetricReader(InMemoryMetricReader.create()) - .setResource(Resource.getDefault()) - .registerView(InstrumentSelector.builder() - .setName("test") - .build(), - View.builder() - .setAggregation(Aggregation.base2ExponentialBucketHistogram(10_000, 5)) - .build() - ) - .build(); - OpenTelemetry openTelemetry = OpenTelemetrySdk.builder() - .setMeterProvider(sdkMeterProvider) - .build(); - Meter meter = openTelemetry - .meterBuilder("instrumentation-library-name") - .setInstrumentationVersion("1.0.0") - .build(); - this.histogram = meter - .histogramBuilder("test") - .setDescription("test") - .build(); - } + } + + @State(Scope.Benchmark) + public static class OpenTelemetryExponentialHistogram { + + final io.opentelemetry.api.metrics.DoubleHistogram histogram; + + public OpenTelemetryExponentialHistogram() { + + SdkMeterProvider sdkMeterProvider = + SdkMeterProvider.builder() + .registerMetricReader(InMemoryMetricReader.create()) + .setResource(Resource.getDefault()) + .registerView( + InstrumentSelector.builder().setName("test").build(), + View.builder() + .setAggregation(Aggregation.base2ExponentialBucketHistogram(10_000, 5)) + .build()) + .build(); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(sdkMeterProvider).build(); + Meter meter = + openTelemetry + .meterBuilder("instrumentation-library-name") + .setInstrumentationVersion("1.0.0") + .build(); + this.histogram = meter.histogramBuilder("test").setDescription("test").build(); } - - @Benchmark - @Threads(4) - public Histogram prometheusClassic(RandomNumbers randomNumbers, PrometheusClassicHistogram histogram) { - for (int i = 0; i < randomNumbers.randomNumbers.length; i++) { - histogram.noLabels.observe(randomNumbers.randomNumbers[i]); - } - return histogram.noLabels; + } + + @Benchmark + @Threads(4) + public Histogram prometheusClassic( + RandomNumbers randomNumbers, PrometheusClassicHistogram histogram) { + for (int i = 0; i < randomNumbers.randomNumbers.length; i++) { + histogram.noLabels.observe(randomNumbers.randomNumbers[i]); } - - @Benchmark - @Threads(4) - public Histogram prometheusNative(RandomNumbers randomNumbers, PrometheusNativeHistogram histogram) { - for (int i = 0; i < randomNumbers.randomNumbers.length; i++) { - histogram.noLabels.observe(randomNumbers.randomNumbers[i]); - } - return histogram.noLabels; + return histogram.noLabels; + } + + @Benchmark + @Threads(4) + public Histogram prometheusNative( + RandomNumbers randomNumbers, PrometheusNativeHistogram histogram) { + for (int i = 0; i < randomNumbers.randomNumbers.length; i++) { + histogram.noLabels.observe(randomNumbers.randomNumbers[i]); } - - @Benchmark - @Threads(4) - public io.prometheus.client.Histogram simpleclient(RandomNumbers randomNumbers, SimpleclientHistogram histogram) { - for (int i = 0; i < randomNumbers.randomNumbers.length; i++) { - histogram.noLabels.observe(randomNumbers.randomNumbers[i]); - } - return histogram.noLabels; + return histogram.noLabels; + } + + @Benchmark + @Threads(4) + public io.prometheus.client.Histogram simpleclient( + RandomNumbers randomNumbers, SimpleclientHistogram histogram) { + for (int i = 0; i < randomNumbers.randomNumbers.length; i++) { + histogram.noLabels.observe(randomNumbers.randomNumbers[i]); } - - @Benchmark - @Threads(4) - public io.opentelemetry.api.metrics.DoubleHistogram openTelemetryClassic(RandomNumbers randomNumbers, OpenTelemetryClassicHistogram histogram) { - for (int i = 0; i < randomNumbers.randomNumbers.length; i++) { - histogram.histogram.record(randomNumbers.randomNumbers[i]); - } - return histogram.histogram; + return histogram.noLabels; + } + + @Benchmark + @Threads(4) + public io.opentelemetry.api.metrics.DoubleHistogram openTelemetryClassic( + RandomNumbers randomNumbers, OpenTelemetryClassicHistogram histogram) { + for (int i = 0; i < randomNumbers.randomNumbers.length; i++) { + histogram.histogram.record(randomNumbers.randomNumbers[i]); } - - @Benchmark - @Threads(4) - public io.opentelemetry.api.metrics.DoubleHistogram openTelemetryExponential(RandomNumbers randomNumbers, OpenTelemetryExponentialHistogram histogram) { - for (int i = 0; i < randomNumbers.randomNumbers.length; i++) { - histogram.histogram.record(randomNumbers.randomNumbers[i]); - } - return histogram.histogram; + return histogram.histogram; + } + + @Benchmark + @Threads(4) + public io.opentelemetry.api.metrics.DoubleHistogram openTelemetryExponential( + RandomNumbers randomNumbers, OpenTelemetryExponentialHistogram histogram) { + for (int i = 0; i < randomNumbers.randomNumbers.length; i++) { + histogram.histogram.record(randomNumbers.randomNumbers[i]); } + return histogram.histogram; + } } diff --git a/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/RandomNumbers.java b/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/RandomNumbers.java index d7002d909..6778c4ea1 100644 --- a/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/RandomNumbers.java +++ b/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/RandomNumbers.java @@ -1,19 +1,18 @@ package io.prometheus.metrics.benchmarks; +import java.util.Random; import org.openjdk.jmh.annotations.Scope; import org.openjdk.jmh.annotations.State; -import java.util.Random; - @State(Scope.Thread) public class RandomNumbers { - final double[] randomNumbers = new double[10*1024]; + final double[] randomNumbers = new double[10 * 1024]; - public RandomNumbers() { - Random rand = new Random(0); - for (int i = 0; i < randomNumbers.length; i++) { - randomNumbers[i] = Math.abs(rand.nextGaussian()); - } + public RandomNumbers() { + Random rand = new Random(0); + for (int i = 0; i < randomNumbers.length; i++) { + randomNumbers[i] = Math.abs(rand.nextGaussian()); } + } } diff --git a/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/TextFormatUtilBenchmark.java b/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/TextFormatUtilBenchmark.java new file mode 100644 index 000000000..ed3ef4246 --- /dev/null +++ b/benchmarks/src/main/java/io/prometheus/metrics/benchmarks/TextFormatUtilBenchmark.java @@ -0,0 +1,136 @@ +package io.prometheus.metrics.benchmarks; + +import io.prometheus.metrics.config.EscapingScheme; +import io.prometheus.metrics.expositionformats.ExpositionFormatWriter; +import io.prometheus.metrics.expositionformats.OpenMetricsTextFormatWriter; +import io.prometheus.metrics.expositionformats.PrometheusTextFormatWriter; +import io.prometheus.metrics.model.snapshots.GaugeSnapshot; +import io.prometheus.metrics.model.snapshots.GaugeSnapshot.GaugeDataPointSnapshot; +import io.prometheus.metrics.model.snapshots.Labels; +import io.prometheus.metrics.model.snapshots.MetricSnapshot; +import io.prometheus.metrics.model.snapshots.MetricSnapshots; +import io.prometheus.metrics.model.snapshots.SummarySnapshot; +import io.prometheus.metrics.model.snapshots.SummarySnapshot.SummaryDataPointSnapshot; +import io.prometheus.metrics.model.snapshots.Unit; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.State; + +/** + * Results on a machine with dedicated Ubuntu 24.04 LTS, AMD Ryzen™ 9 7900 × 24, 96.0 GiB RAM: + * + *

+ * Benchmark                                             Mode  Cnt       Score       Error  Units
+ * TextFormatUtilBenchmark.openMetricsWriteToByteArray  thrpt   25  826847.708 ± 10941.611  ops/s
+ * TextFormatUtilBenchmark.openMetricsWriteToNull       thrpt   25  847756.101 ±  5299.128  ops/s
+ * TextFormatUtilBenchmark.prometheusWriteToByteArray   thrpt   25  874804.601 ±  9730.060  ops/s
+ * TextFormatUtilBenchmark.prometheusWriteToNull        thrpt   25  910782.719 ± 17617.167  ops/s
+ * 
+ */ +public class TextFormatUtilBenchmark { + + private static final MetricSnapshots SNAPSHOTS; + + static { + MetricSnapshot gaugeSnapshot = + GaugeSnapshot.builder() + .name("gauge_snapshot_name") + .dataPoint( + GaugeDataPointSnapshot.builder() + .labels(Labels.of("name", "value")) + .scrapeTimestampMillis(1000L) + .value(123.45d) + .build()) + .build(); + + MetricSnapshot summaryDataPointSnapshot = + SummarySnapshot.builder() + .name("summary_snapshot_name_bytes") + .dataPoint( + SummaryDataPointSnapshot.builder() + .count(5) + .labels(Labels.of("name", "value")) + .sum(123456d) + .build()) + .unit(Unit.BYTES) + .build(); + + SNAPSHOTS = MetricSnapshots.of(gaugeSnapshot, summaryDataPointSnapshot); + } + + private static final ExpositionFormatWriter OPEN_METRICS_TEXT_FORMAT_WRITER = + OpenMetricsTextFormatWriter.create(); + private static final ExpositionFormatWriter PROMETHEUS_TEXT_FORMAT_WRITER = + PrometheusTextFormatWriter.create(); + + @State(Scope.Benchmark) + public static class WriterState { + + final ByteArrayOutputStream byteArrayOutputStream; + + public WriterState() { + this.byteArrayOutputStream = new ByteArrayOutputStream(); + } + } + + @Benchmark + public OutputStream openMetricsWriteToByteArray(WriterState writerState) throws IOException { + // avoid growing the array + ByteArrayOutputStream byteArrayOutputStream = writerState.byteArrayOutputStream; + byteArrayOutputStream.reset(); + OPEN_METRICS_TEXT_FORMAT_WRITER.write( + byteArrayOutputStream, SNAPSHOTS, EscapingScheme.ALLOW_UTF8); + return byteArrayOutputStream; + } + + @Benchmark + public OutputStream openMetricsWriteToNull() throws IOException { + OutputStream nullOutputStream = NullOutputStream.INSTANCE; + OPEN_METRICS_TEXT_FORMAT_WRITER.write(nullOutputStream, SNAPSHOTS, EscapingScheme.ALLOW_UTF8); + return nullOutputStream; + } + + @Benchmark + public OutputStream prometheusWriteToByteArray(WriterState writerState) throws IOException { + // avoid growing the array + ByteArrayOutputStream byteArrayOutputStream = writerState.byteArrayOutputStream; + byteArrayOutputStream.reset(); + PROMETHEUS_TEXT_FORMAT_WRITER.write( + byteArrayOutputStream, SNAPSHOTS, EscapingScheme.ALLOW_UTF8); + return byteArrayOutputStream; + } + + @Benchmark + public OutputStream prometheusWriteToNull() throws IOException { + OutputStream nullOutputStream = NullOutputStream.INSTANCE; + PROMETHEUS_TEXT_FORMAT_WRITER.write(nullOutputStream, SNAPSHOTS, EscapingScheme.ALLOW_UTF8); + return nullOutputStream; + } + + static final class NullOutputStream extends OutputStream { + + static final OutputStream INSTANCE = new NullOutputStream(); + + private NullOutputStream() { + super(); + } + + @Override + public void write(int b) {} + + @Override + public void write(byte[] b) {} + + @Override + public void write(byte[] b, int off, int len) {} + + @Override + public void flush() {} + + @Override + public void close() {} + } +} diff --git a/checkstyle-suppressions.xml b/checkstyle-suppressions.xml new file mode 100644 index 000000000..82e964658 --- /dev/null +++ b/checkstyle-suppressions.xml @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/checkstyle.xml b/checkstyle.xml new file mode 100644 index 000000000..cac6be8d3 --- /dev/null +++ b/checkstyle.xml @@ -0,0 +1,374 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/README.md b/docs/README.md index 2868196d5..8ca147236 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,59 +1,60 @@ -Docs ----- +# Docs -This directory contains [hugo](https://gohugo.io) documentation to be published in Github pages. +This directory contains [hugo](https://gohugo.io) documentation to be published in GitHub pages. -Run Locally ------------ +## Run Locally -``` +```shell hugo server -D ``` This will serve the docs on [http://localhost:1313](http://localhost:1313). -Deploy to Github Pages ----------------------- +## Deploy to GitHub Pages -Changes to the `main` branch will be deployed automatically with Github actions. +Changes to the `main` branch will be deployed automatically with GitHub Actions. -Update Javadoc --------------- +## Update Javadoc -Javadoc are not checked-in to the Github repository. -They are generated on the fly by Github actions when the docs are updated. +Javadoc are not checked-in to the GitHub repository. +They are generated on the fly by GitHub Actions when the docs are updated. To view locally, run the following: -``` -# note that the 'compile' in the following command is necessary for Javadoc to detect the module structure +```shell +# note that the 'compile' in the following command is necessary for +# Javadoc to detect the module structure ./mvnw clean compile javadoc:javadoc javadoc:aggregate rm -r ./docs/static/api mv ./target/site/apidocs ./docs/static/api ``` -Github pages are in the `/client_java/` folder, so we link to `/client_java/api` rather than `/api`. +GitHub pages are in the `/client_java/` folder, so we link to `/client_java/api` rather than `/api`. To make JavaDoc work locally, create a link: -``` +```shell mkdir ./docs/static/client_java ln -s ../api ./docs/static/client_java/api ``` -Update Geekdocs ---------------- +## Update Geekdocs -The docs use the [Geekdocs](https://geekdocs.de/) theme. The theme is checked in to Github in the `./docs/themes/hugo-geekdoc/` folder. To update [Geekdocs](https://geekdocs.de/), remove the current folder and create a new one with the latest [release](https://github.com/thegeeklab/hugo-geekdoc/releases). There are no local modifications in `./docs/themes/hugo-geekdoc/`. +The docs use the [Geekdocs](https://geekdocs.de/) theme. The theme is checked in to GitHub in the +`./docs/themes/hugo-geekdoc/` folder. To update [Geekdocs](https://geekdocs.de/), remove the current +folder and create a new one with the +latest [release](https://github.com/thegeeklab/hugo-geekdoc/releases). There are no local +modifications in `./docs/themes/hugo-geekdoc/`. -Notes ------ +## Notes Here's how the initial `docs/` folder was set up: -``` +```shell hugo new site docs cd docs/ mkdir -p themes/hugo-geekdoc/ -curl -L https://github.com/thegeeklab/hugo-geekdoc/releases/download/v0.41.1/hugo-geekdoc.tar.gz | tar -xz -C themes/hugo-geekdoc/ --strip-components=1 +curl -L https://github.com/thegeeklab/hugo-geekdoc/releases/download/v0.41.1/hugo-geekdoc.tar.gz \ + | tar -xz -C themes/hugo-geekdoc/ --strip-components=1 ``` -Create the initial `hugo.toml` file as described in [https://geekdocs.de/usage/getting-started/](https://geekdocs.de/usage/getting-started/). +Create the initial `hugo.toml` file as described +in [https://geekdocs.de/usage/getting-started/](https://geekdocs.de/usage/getting-started/). diff --git a/docs/content/_index.md b/docs/content/_index.md index 9de934995..28e5165cd 100644 --- a/docs/content/_index.md +++ b/docs/content/_index.md @@ -2,32 +2,54 @@ title: "client_java" --- -This is the documentation for the [Prometheus Java client library](https://github.com/prometheus/client_java) version 1.0.0 and higher. +This is the documentation for the +[Prometheus Java client library](https://github.com/prometheus/client_java) +version 1.0.0 and higher. The main new features of the 1.0.0 release are: -* **Prometheus native histograms:** Support for the new Prometheus histogram type. -* **OpenTelemetry Exporter:** Push metrics in OTLP format to an OpenTelemetry endpoint. -* **Runtime configuration:** Configure metrics, exporters, and more at runtime using a properties file or system properties. +- **Prometheus native histograms:** Support for the new Prometheus histogram type. +- **OpenTelemetry Exporter:** Push metrics in OTLP format to an OpenTelemetry endpoint. +- **Runtime configuration:** Configure metrics, exporters, and more at runtime using a properties + file or system properties. **Documentation and Examples** -In addition to this documentation page we created an [examples/](https://github.com/prometheus/client_java/tree/main/examples) directory with end-to-end scenarios (Docker compose) illustrating new features. +In addition to this documentation page we created an +[examples/](https://github.com/prometheus/client_java/tree/main/examples) directory with end-to-end +scenarios (Docker compose) illustrating new features. **Performance Benchmarks** -Initial performance benchmarks are looking great: All core metric types (including native histograms) allow concurrent updates, so if you instrument a performance critical Web service that utilizes all processor cores in parallel the metrics library will not introduce additional synchronization. See Javadoc comments in [benchmarks/](https://github.com/prometheus/client_java/tree/main/benchmarks) for benchmark results. +Initial performance benchmarks are looking great: All core metric types (including native +histograms) allow concurrent updates, so if you instrument a performance critical Web service +that utilizes all processor cores in parallel the metrics library will not introduce additional +synchronization. See Javadoc comments in +[benchmarks/](https://github.com/prometheus/client_java/tree/main/benchmarks) for benchmark results. **More Info** -The Grafana Labs Blog has a post [Introducing the Prometheus Java Client 1.0.0](https://grafana.com/blog/2023/09/27/introducing-the-prometheus-java-client-1.0.0/) with a good overview of the release. +The Grafana Labs Blog has a post +[Introducing the Prometheus Java Client 1.0.0](https://grafana.com/blog/2023/09/27/introducing-the-prometheus-java-client-1-0-0/) +with a good overview of the release. -There will also be a presentation at the [PromCon](https://promcon.io) conference on 29 Sep 2023. Tune in to the live stream on [https://promcon.io](https://promcon.io) or watch the recording on YouTube. +There will also be a presentation at the [PromCon](https://promcon.io) conference on 29 Sep 2023. +Tune in to the live stream on [https://promcon.io](https://promcon.io) +or watch the recording on YouTube. **For users of the 0.16.0 version and older** -Updating to the 1.0.0 version is a breaking change. However, there's a `prometheus-metrics-simpleclient-bridge` module available that allows you to use your existing simpleclient 0.16.0 metrics with the new 1.0.0 `PrometheusRegistry`. So you don't need to upgrade your instrumentation code, you can keep using your existing metrics. See the [compatibility > simpleclient](https://prometheus.github.io/client_java/migration/simpleclient/) in the menu on the left. +Updating to the 1.0.0 version is a breaking change. However, there's a +`prometheus-metrics-simpleclient-bridge` module available that allows you to use your existing +simpleclient 0.16.0 metrics with the new 1.0.0 `PrometheusRegistry`. +So you don't need to upgrade your instrumentation code, you can keep using your existing metrics. +See the +[compatibility > simpleclient](https://prometheus.github.io/client_java/migration/simpleclient/) +in the menu on the left. -The pre 1.0.0 code is now maintained on the [simpleclient](https://github.com/prometheus/client_java/tree/simpleclient) feature branch. +The pre 1.0.0 code is now maintained on the +[simpleclient](https://github.com/prometheus/client_java/tree/simpleclient) feature branch. -Not all `simpleclient` modules from 0.16.0 are included in the initial 1.0.0 release. Over the next couple of weeks we will work on porting the remaining modules, starting with `pushgateway` and the Servlet filter. +Not all `simpleclient` modules from 0.16.0 are included in the initial 1.0.0 release. +Over the next couple of weeks we will work on porting the remaining modules, +starting with `pushgateway` and the Servlet filter. diff --git a/docs/content/config/config.md b/docs/content/config/config.md index 43a396174..cd7f7af7b 100644 --- a/docs/content/config/config.md +++ b/docs/content/config/config.md @@ -7,133 +7,214 @@ weight: 1 The Prometheus metrics library provides multiple options how to override configuration at runtime: -* Properties file -* System properties - -Future releases will add more options, like configuration via environment variables. +- Properties file +- System properties +- Environment variables Example: -``` -io.prometheus.exporter.httpServer.port = 9401 +```properties +io.prometheus.exporter.http_server.port=9401 ``` -The property above changes the port for the [HTTPServer exporter]({{< relref "/exporters/httpserver.md" >}}) to _9401_. +The property above changes the port for the +[HTTPServer exporter]({{< relref "/exporters/httpserver.md" >}}) to _9401_. -* Properties file: Add the line above to the properties file. -* System properties: Use the command line parameter `-Dio.prometheus.exporter.httpServer.port=9401` when starting your application. +- **Properties file**: Add the line above to the properties file. +- **System properties**: Use the command line parameter + `-Dio.prometheus.exporter.http_server.port=9401` when starting your application. +- **Environment variables**: Set `IO_PROMETHEUS_EXPORTER_HTTP_SERVER_PORT=9401` -Location of the Properties File -------------------------------- +## Location of the Properties File The properties file is searched in the following locations: -* `/prometheus.properties` in the classpath. This is for bundling a properties file with your application. -* System property `-Dprometheus.config=/path/to/prometheus.properties`. -* Environment variable `PROMETHEUS_CONFIG=/path/to/prometheus.properties`. - -Metrics Properties ------------------- - -| Name | Javadoc | Note | -| --------------- | --------|------| -| io.prometheus.metrics.exemplarsEnabled | [Counter.Builder.withExemplars()](/client_java/api/io/prometheus/metrics/core/metrics/Counter.Builder.html#withExemplars()) | (1) (2) | -| io.prometheus.metrics.histogramNativeOnly | [Histogram.Builder.nativeOnly()](/client_java/api/io/prometheus/metrics/core/metrics/Histogram.Builder.html#nativeOnly()) | (2) | -| io.prometheus.metrics.histogramClassicOnly | [Histogram.Builder.classicOnly()](/client_java/api/io/prometheus/metrics/core/metrics/Histogram.Builder.html#classicOnly()) | (2) | -| io.prometheus.metrics.histogramClassicUpperBounds | [Histogram.Builder.classicUpperBounds()](/client_java/api/io/prometheus/metrics/core/metrics/Histogram.Builder.html#classicUpperBounds(double...)) | (3) | -| io.prometheus.metrics.histogramNativeInitialSchema | [Histogram.Builder.nativeInitialSchema()](/client_java/api/io/prometheus/metrics/core/metrics/Histogram.Builder.html#nativeInitialSchema(int)) | | -| io.prometheus.metrics.histogramNativeMinZeroThreshold | [Histogram.Builder.nativeMinZeroThreshold()](/client_java/api/io/prometheus/metrics/core/metrics/Histogram.Builder.html#nativeMinZeroThreshold(double)) | | -| io.prometheus.metrics.histogramNativeMaxZeroThreshold | [Histogram.Builder.nativeMaxZeroThreshold()](/client_java/api/io/prometheus/metrics/core/metrics/Histogram.Builder.html#nativeMaxZeroThreshold(double)) | | -| io.prometheus.metrics.histogramNativeMaxNumberOfBuckets | [Histogram.Builder.nativeMaxNumberOfBuckets()](/client_java/api/io/prometheus/metrics/core/metrics/Histogram.Builder.html#nativeMaxNumberOfBuckets(int)) | | -| io.prometheus.metrics.histogramNativeResetDurationSeconds | [Histogram.Builder.nativeResetDuration()](/client_java/api/io/prometheus/metrics/core/metrics/Histogram.Builder.html#nativeResetDuration(long,java.util.concurrent.TimeUnit)) | | -| io.prometheus.metrics.summaryQuantiles | [Summary.Builder.quantile(double)](/client_java/api/io/prometheus/metrics/core/metrics/Summary.Builder.html#quantile(double)) | (4) | -| io.prometheus.metrics.summaryQuantileErrors | [Summary.Builder.quantile(double, double)](/client_java/api/io/prometheus/metrics/core/metrics/Summary.Builder.html#quantile(double,double)) | (5) | -| io.prometheus.metrics.summaryMaxAgeSeconds | [Summary.Builder.maxAgeSeconds()](/client_java/api/io/prometheus/metrics/core/metrics/Summary.Builder.html#maxAgeSeconds(long)) | | -| io.prometheus.metrics.summaryNumberOfAgeBuckets | [Summary.Builder.numberOfAgeBuckets()](/client_java/api/io/prometheus/metrics/core/metrics/Summary.Builder.html#numberOfAgeBuckets(int)) | | +- `/prometheus.properties` in the classpath. This is for bundling a properties file + with your application. +- System property `-Dprometheus.config=/path/to/prometheus.properties`. +- Environment variable `PROMETHEUS_CONFIG=/path/to/prometheus.properties`. + +## Property Naming Conventions + +Properties use **snake_case** format with underscores separating words +(e.g., `http_server`, `exemplars_enabled`). + +For backward compatibility, camelCase property names are also supported in +properties files and system properties, but snake_case is the preferred format. + +### Environment Variables + +Environment variables follow standard conventions: + +- All uppercase letters: `IO_PROMETHEUS_EXPORTER_HTTP_SERVER_PORT` +- Underscores for all separators (both package and word boundaries) +- Prefix must be `IO_PROMETHEUS` + +The library automatically converts environment variables to the correct property format. + +**Examples:** + +| Environment Variable | Property Equivalent | +| --------------------------------------------- | --------------------------------------------- | +| `IO_PROMETHEUS_METRICS_EXEMPLARS_ENABLED` | `io.prometheus.metrics.exemplars_enabled` | +| `IO_PROMETHEUS_EXPORTER_HTTP_SERVER_PORT` | `io.prometheus.exporter.http_server.port` | +| `IO_PROMETHEUS_METRICS_HISTOGRAM_NATIVE_ONLY` | `io.prometheus.metrics.histogram_native_only` | + +### Property Precedence + +When the same property is defined in multiple sources, the following precedence order applies +(highest to lowest): + +1. **External properties** (passed explicitly via API) +2. **Environment variables** +3. **System properties** (command line `-D` flags) +4. **Properties file** (from file or classpath) + +## Metrics Properties + + + +| Name | Javadoc | Note | +| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| io.prometheus.metrics.exemplars_enabled | [Counter.Builder.withExemplars()]() | (1) (2) | +| io.prometheus.metrics.histogram_native_only | [Histogram.Builder.nativeOnly()]() | (2) | +| io.prometheus.metrics.histogram_classic_only | [Histogram.Builder.classicOnly()]() | (2) | +| io.prometheus.metrics.histogram_classic_upper_bounds | [Histogram.Builder.classicUpperBounds()]() | (3) | +| io.prometheus.metrics.histogram_native_initial_schema | [Histogram.Builder.nativeInitialSchema()]() | | +| io.prometheus.metrics.histogram_native_min_zero_threshold | [Histogram.Builder.nativeMinZeroThreshold()]() | | +| io.prometheus.metrics.histogram_native_max_zero_threshold | [Histogram.Builder.nativeMaxZeroThreshold()]() | | +| io.prometheus.metrics.histogram_native_max_number_of_buckets | [Histogram.Builder.nativeMaxNumberOfBuckets()]() | | +| io.prometheus.metrics.histogram_native_reset_duration_seconds | [Histogram.Builder.nativeResetDuration()]() | | +| io.prometheus.metrics.summary_quantiles | [Summary.Builder.quantile(double)]() | (4) | +| io.prometheus.metrics.summary_quantile_errors | [Summary.Builder.quantile(double, double)]() | (5) | +| io.prometheus.metrics.summary_max_age_seconds | [Summary.Builder.maxAgeSeconds()]() | | +| io.prometheus.metrics.summary_number_of_age_buckets | [Summary.Builder.numberOfAgeBuckets()]() | | + + **Notes** -(1) _withExemplars()_ and _withoutExemplars()_ are available for all metric types, not just for counters
+(1) _withExemplars()_ and _withoutExemplars()_ are available for all metric types, +not just for counters
(2) Boolean value. Format: `property=true` or `property=false`.
(3) Comma-separated list. Example: `.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10`.
(4) Comma-separated list. Example: `0.5, 0.95, 0.99`.
-(5) Comma-separated list. If specified, the list must have the same length as `io.prometheus.metrics.summaryQuantiles`. Example: `0.01, 0.005, 0.005`. +(5) Comma-separated list. If specified, the list must have the same length as +`io.prometheus.metrics.summary_quantiles`. Example: `0.01, 0.005, 0.005`. -There's one special feature about metric properties: You can set a property for one specific metric only by specifying the metric name. Example: Let's say you have a histogram named `latency_seconds`. +There's one special feature about metric properties: You can set a property for one specific +metric only by specifying the metric name. Example: +Let's say you have a histogram named `latency_seconds`. -``` -io.prometheus.metrics.histogramClassicUpperBounds = 0.2, 0.4, 0.8, 1.0 +```properties +io.prometheus.metrics.histogram_classic_upper_bounds=0.2, 0.4, 0.8, 1.0 ``` The line above sets histogram buckets for all histograms. However: -``` -io.prometheus.metrics.latency_seconds.histogramClassicUpperBounds = 0.2, 0.4, 0.8, 1.0 +```properties +io.prometheus.metrics.latency_seconds.histogram_classic_upper_bounds=0.2, 0.4, 0.8, 1.0 ``` The line above sets histogram buckets only for the histogram named `latency_seconds`. This works for all Metrics properties. -Exemplar Properties -------------------- +## Exemplar Properties + + -| Name | Javadoc | Note | -| --------------- | --------|------| -| io.prometheus.exemplars.minRetentionPeriodSeconds | [ExemplarsProperties.getMinRetentionPeriodSeconds()](/client_java/api/io/prometheus/metrics/config/ExemplarsProperties.html#getMinRetentionPeriodSeconds()) | | -| io.prometheus.exemplars.maxRetentionPeriodSeconds | [ExemplarsProperties.getMaxRetentionPeriodSeconds()](/client_java/api/io/prometheus/metrics/config/ExemplarsProperties.html#getMaxRetentionPeriodSeconds()) | | -| io.prometheus.exemplars.sampleIntervalMilliseconds | [ExemplarsProperties.getSampleIntervalMilliseconds()](/client_java/api/io/prometheus/metrics/config/ExemplarsProperties.html#getSampleIntervalMilliseconds()) | | +| Name | Javadoc | Note | +| ---------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---- | +| io.prometheus.exemplars.min_retention_period_seconds | [ExemplarsProperties.getMinRetentionPeriodSeconds()]() | | +| io.prometheus.exemplars.max_retention_period_seconds | [ExemplarsProperties.getMaxRetentionPeriodSeconds()]() | | +| io.prometheus.exemplars.sample_interval_milliseconds | [ExemplarsProperties.getSampleIntervalMilliseconds()]() | | -Exporter Properties -------------------- + -| Name | Javadoc | Note | -| --------------- | --------|------| -| io.prometheus.exporter.includeCreatedTimestamps | [ExporterProperties.getExemplarsOnAllMetricTypes()](/client_java/api/io/prometheus/metrics/config/ExporterProperties.html#getExemplarsOnAllMetricTypes()) | (1) | -| io.prometheus.exporter.exemplarsOnAllMetricTypes | [ExporterProperties.getIncludeCreatedTimestamps()](/client_java/api/io/prometheus/metrics/config/ExporterProperties.html#getIncludeCreatedTimestamps()) | (1) | +## Exporter Properties + + + +| Name | Javadoc | Note | +| ---------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ---- | +| io.prometheus.exporter.include_created_timestamps | [ExporterProperties.getIncludeCreatedTimestamps()]() | (1) | +| io.prometheus.exporter.exemplars_on_all_metric_types | [ExporterProperties.getExemplarsOnAllMetricTypes()]() | (1) | + + (1) Boolean value, `true` or `false`. Default see Javadoc. -Exporter Filter Properties --------------------------- - -| Name | Javadoc | Note | -| --------------- | --------|------| -| io.prometheus.exporter.filter.metricNameMustBeEqualTo | [ExporterFilterProperties.getAllowedMetricNames()](/client_java/api/io/prometheus/metrics/config/ExporterFilterProperties.html#getAllowedMetricNames()) | (1) | -| io.prometheus.exporter.filter.metricNameMustNotBeEqualTo | [ExporterFilterProperties.getExcludedMetricNames()](/client_java/api/io/prometheus/metrics/config/ExporterFilterProperties.html#getExcludedMetricNames()) | (2) | -| io.prometheus.exporter.filter.metricNameMustStartWith | [ExporterFilterProperties.getAllowedMetricNamePrefixes()](/client_java/api/io/prometheus/metrics/config/ExporterFilterProperties.html#getAllowedMetricNamePrefixes()) | (3) | -| io.prometheus.exporter.filter.metricNameMustNotStartWith | [ExporterFilterProperties.getExcludedMetricNamePrefixes()](/client_java/api/io/prometheus/metrics/config/ExporterFilterProperties.html#getExcludedMetricNamePrefixes()) | (4) | - -(1) Comma sparated list of allowed metric names. Only these metrics will be exposed.
-(2) Comma sparated list of excluded metric names. These metrics will not be exposed.
-(3) Comma sparated list of prefixes. Only metrics starting with these prefixes will be exposed.
-(4) Comma sparated list of prefixes. Metrics starting with these prefixes will not be exposed.
- -Exporter HTTPServer Properties ------------------------------- - -| Name | Javadoc | Note | -| --------------- | --------|------| -| io.prometheus.exporter.httpServer.port | [HTTPServer.Builder.port()](/client_java/api/io/prometheus/metrics/exporter/httpserver/HTTPServer.Builder.html#port(int)) | | - -Exporter OpenTelemetry Properties ---------------------------------- - -| Name | Javadoc | Note | -| --------------- | --------|------| -| io.prometheus.exporter.opentelemetry.protocol | [OpenTelemetryExporter.Builder.protocol()](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.Builder.html#protocol(java.lang.String)) | (1) | -| io.prometheus.exporter.opentelemetry.endpoint | [OpenTelemetryExporter.Builder.endpoint()](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.Builder.html#endpoint(java.lang.String)) | | -| io.prometheus.exporter.opentelemetry.headers | [OpenTelemetryExporter.Builder.headers()](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.Builder.html#header(java.lang.String,java.lang.String)) | (2) | -| io.prometheus.exporter.opentelemetry.intervalSeconds | [OpenTelemetryExporter.Builder.intervalSeconds()](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.Builder.html#intervalSeconds(int)) | | -| io.prometheus.exporter.opentelemetry.timeoutSeconds | [OpenTelemetryExporter.Builder.timeoutSeconds()](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.Builder.html#timeoutSeconds(int)) | | -| io.prometheus.exporter.opentelemetry.serviceName | [OpenTelemetryExporter.Builder.serviceName()](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.Builder.html#serviceName(java.lang.String)) | | -| io.prometheus.exporter.opentelemetry.serviceNamespace | [OpenTelemetryExporter.Builder.serviceNamespace()](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.Builder.html#serviceNamespace(java.lang.String)) | | -| io.prometheus.exporter.opentelemetry.serviceInstanceId | [OpenTelemetryExporter.Builder.serviceInstanceId()](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.Builder.html#serviceInstanceId(java.lang.String)) | | -| io.prometheus.exporter.opentelemetry.serviceVersion | [OpenTelemetryExporter.Builder.serviceVersion()](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.Builder.html#serviceVersion(java.lang.String)) | | -| io.prometheus.exporter.opentelemetry.resourceAttributes | [OpenTelemetryExporter.Builder.resourceAttributes()](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.Builder.html#resourceAttribute(java.lang.String,java.lang.String)) | (3) | +## Exporter Filter Properties + + + +| Name | Javadoc | Note | +| -------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---- | +| io.prometheus.exporter.filter.metric_name_must_be_equal_to | [ExporterFilterProperties.getAllowedMetricNames()]() | (1) | +| io.prometheus.exporter.filter.metric_name_must_not_be_equal_to | [ExporterFilterProperties.getExcludedMetricNames()]() | (2) | +| io.prometheus.exporter.filter.metric_name_must_start_with | [ExporterFilterProperties.getAllowedMetricNamePrefixes()]() | (3) | +| io.prometheus.exporter.filter.metric_name_must_not_start_with | [ExporterFilterProperties.getExcludedMetricNamePrefixes()]() | (4) | + + + +(1) Comma separated list of allowed metric names. Only these metrics will be exposed.
+(2) Comma separated list of excluded metric names. These metrics will not be exposed.
+(3) Comma separated list of prefixes. +Only metrics starting with these prefixes will be exposed.
+(4) Comma separated list of prefixes. Metrics starting with these prefixes will not be exposed.
+ +## Exporter HTTPServer Properties + + + +| Name | Javadoc | Note | +| --------------------------------------- | --------------------------------------------------------------------------------------------------------------------------- | ---- | +| io.prometheus.exporter.http_server.port | [HTTPServer.Builder.port()]() | | + + + +## Exporter OpenTelemetry Properties + + + +| Name | Javadoc | Note | +| -------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---- | +| io.prometheus.exporter.opentelemetry.protocol | [OpenTelemetryExporter.Builder.protocol()]() | (1) | +| io.prometheus.exporter.opentelemetry.endpoint | [OpenTelemetryExporter.Builder.endpoint()]() | | +| io.prometheus.exporter.opentelemetry.headers | [OpenTelemetryExporter.Builder.headers()]() | (2) | +| io.prometheus.exporter.opentelemetry.interval_seconds | [OpenTelemetryExporter.Builder.intervalSeconds()]() | | +| io.prometheus.exporter.opentelemetry.timeout_seconds | [OpenTelemetryExporter.Builder.timeoutSeconds()]() | | +| io.prometheus.exporter.opentelemetry.service_name | [OpenTelemetryExporter.Builder.serviceName()]() | | +| io.prometheus.exporter.opentelemetry.service_namespace | [OpenTelemetryExporter.Builder.serviceNamespace()]() | | +| io.prometheus.exporter.opentelemetry.service_instance_id | [OpenTelemetryExporter.Builder.serviceInstanceId()]() | | +| io.prometheus.exporter.opentelemetry.service_version | [OpenTelemetryExporter.Builder.serviceVersion()]() | | +| io.prometheus.exporter.opentelemetry.resource_attributes | [OpenTelemetryExporter.Builder.resourceAttributes()]() | (3) | + + (1) Protocol can be `grpc` or `http/protobuf`.
(2) Format: `key1=value1,key2=value2`
(3) Format: `key1=value1,key2=value2` -Many of these attributes can alternatively be configured via OpenTelemetry environment variables, like `OTEL_EXPORTER_OTLP_ENDPOINT`. The Prometheus metrics library has support for OpenTelemetry environment variables. See Javadoc for details. +Many of these attributes can alternatively be configured via OpenTelemetry environment variables, +like `OTEL_EXPORTER_OTLP_ENDPOINT`. +The Prometheus metrics library has support for OpenTelemetry environment variables. +See Javadoc for details. + +## Exporter PushGateway Properties + + + +| Name | Javadoc | Note | +| -------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---- | +| io.prometheus.exporter.pushgateway.address | [PushGateway.Builder.address()]() | | +| io.prometheus.exporter.pushgateway.scheme | [PushGateway.Builder.scheme()]() | | +| io.prometheus.exporter.pushgateway.job | [PushGateway.Builder.job()]() | | +| io.prometheus.exporter.pushgateway.escaping_scheme | [PushGateway.Builder.escapingScheme()]() | (1) | + + + +(1) Escaping scheme can be `allow-utf-8`, `underscores`, `dots`, or `values` as described in +[escaping schemes](https://github.com/prometheus/docs/blob/main/docs/instrumenting/escaping_schemes.md#escaping-schemes) +and in the [Unicode documentation]({{< relref "../exporters/unicode.md" >}}). diff --git a/docs/content/exporters/filter.md b/docs/content/exporters/filter.md index 32dc0a56d..ae7ad9564 100644 --- a/docs/content/exporters/filter.md +++ b/docs/content/exporters/filter.md @@ -1,18 +1,20 @@ --- title: Filter -weight: 2 +weight: 3 --- All exporters support a `name[]` URL parameter for querying only specific metric names. Examples: -* `/metrics?name[]=jvm_threads_current` will query the metric named `jvm_threads_current`. -* `/metrics?name[]=jvm_threads_current&name[]=jvm_threads_daemon` will query two metrics, `jvm_threads_current` and `jvm_threads_daemon`. +- `/metrics?name[]=jvm_threads_current` will query the metric named `jvm_threads_current`. +- `/metrics?name[]=jvm_threads_current&name[]=jvm_threads_daemon` will query two metrics, + `jvm_threads_current` and `jvm_threads_daemon`. -Add the following to the scape job configuration in `prometheus.yml` to make the Prometheus server send the `name[]` parameter: +Add the following to the scape job configuration in `prometheus.yml` +to make the Prometheus server send the `name[]` parameter: ```yaml params: - name[]: - - jvm_threads_current - - jvm_threads_daemon + name[]: + - jvm_threads_current + - jvm_threads_daemon ``` diff --git a/docs/content/exporters/formats.md b/docs/content/exporters/formats.md index fc78ec963..a6485c84c 100644 --- a/docs/content/exporters/formats.md +++ b/docs/content/exporters/formats.md @@ -5,22 +5,106 @@ weight: 1 All exporters the following exposition formats: -* OpenMetrics text format -* Prometheus text format -* Prometheus protobuf format +- OpenMetrics text format +- Prometheus text format +- Prometheus protobuf format Moreover, gzip encoding is supported for each of these formats. -Scraping with a Prometheus server ---------------------------------- +## Scraping with a Prometheus server -The Prometheus server sends an `Accept` header to specify which format is requested. By default, the Prometheus server will scrape OpenMetrics text format with gzip encoding. If the Prometheus server is started with `--enable-feature=native-histograms`, it will scrape Prometheus protobuf format instead. +The Prometheus server sends an `Accept` header to specify which format is requested. By default, the +Prometheus server will scrape OpenMetrics text format with gzip encoding. If the Prometheus server +is started with `--enable-feature=native-histograms`, it will scrape Prometheus protobuf format +instead. -Viewing with a Web Browser --------------------------- +## Viewing with a Web Browser -If you view the `/metrics` endpoint with your Web browser you will see Prometheus text format. For quick debugging of the other formats, exporters provide a `debug` URL parameter: +If you view the `/metrics` endpoint with your Web browser you will see Prometheus text format. For +quick debugging of the other formats, exporters provide a `debug` URL parameter: -* `/metrics?debug=openmetrics`: View OpenMetrics text format. -* `/metrics?debug=text`: View Prometheus text format. -* `/metrics?debug=prometheus-protobuf`: View a text representation of the Prometheus protobuf format. +- `/metrics?debug=openmetrics`: View OpenMetrics text format. +- `/metrics?debug=text`: View Prometheus text format. +- `/metrics?debug=prometheus-protobuf`: View a text representation of the Prometheus protobuf + format. + +## Exclude protobuf exposition format + +You can exclude the protobuf exposition format by including the +`prometheus-metrics-exposition-textformats` module and excluding the +`prometheus-metrics-exposition-formats` module in your build file. + +For example, in Maven: + +```xml + + + io.prometheus + prometheus-metrics-exporter-httpserver + + + io.prometheus + prometheus-metrics-exposition-formats + + + + + io.prometheus + prometheus-metrics-exposition-textformats + + +``` + +## Exclude the shaded protobuf classes + +You can exclude the shaded protobuf classes including the +`prometheus-metrics-exposition-formats-no-protobuf` module and excluding the +`prometheus-metrics-exposition-formats` module in your build file. + +For example, in Maven: + +```xml + + + io.prometheus + prometheus-metrics-exporter-httpserver + + + io.prometheus + prometheus-metrics-exposition-formats + + + + + io.prometheus + prometheus-metrics-exposition-formats-no-protobuf + + +``` + +## Exclude the shaded otel classes + +You can exclude the shaded otel classes including the +`prometheus-metrics-exporter-opentelemetry-no-otel` module and excluding the +`prometheus-metrics-exporter-opentelemetry` module in your build file. + +For example, in Maven: + +```xml + + + io.prometheus + prometheus-metrics-exporter-opentelemetry + + + io.prometheus + prometheus-metrics-exporter-opentelemetry + + + + + io.prometheus + prometheus-metrics-exporter-opentelemetry-no-otel + + +``` diff --git a/docs/content/exporters/httpserver.md b/docs/content/exporters/httpserver.md index e524b4cd1..a9017b0de 100644 --- a/docs/content/exporters/httpserver.md +++ b/docs/content/exporters/httpserver.md @@ -1,37 +1,43 @@ --- title: HTTPServer -weight: 3 +weight: 4 --- -The `HTTPServer` is a standalone server for exposing a metric endpoint. A minimal example application for `HTTPServer` can be found in the [examples](https://github.com/prometheus/client_java/tree/1.0.x/examples) directory. +The `HTTPServer` is a standalone server for exposing a metric endpoint. A minimal example +application for `HTTPServer` can be found in +the [examples](https://github.com/prometheus/client_java/tree/1.0.x/examples) directory. ```java HTTPServer server = HTTPServer.builder() - .port(9400) - .buildAndStart(); + .port(9400) + .buildAndStart(); ``` -By default, `HTTPServer` binds to any IP address, you can change this with [hostname()](/client_java/api/io/prometheus/metrics/exporter/httpserver/HTTPServer.Builder.html#hostname(java.lang.String)) or [inetAddress()](/client_java/api/io/prometheus/metrics/exporter/httpserver/HTTPServer.Builder.html#inetAddress(java.net.InetAddress)). +By default, `HTTPServer` binds to any IP address, you can change this with +[hostname()]() +or [inetAddress()](). `HTTPServer` is configured with three endpoints: -* `/metrics` for Prometheus scraping. -* `/-/healthy` for simple health checks. -* `/` the default handler is a static HTML page. +- `/metrics` for Prometheus scraping. +- `/-/healthy` for simple health checks. +- `/` the default handler is a static HTML page. -The default handler can be changed with [defaultHandler()](/client_java/api/io/prometheus/metrics/exporter/httpserver/HTTPServer.Builder.html#defaultHandler(com.sun.net.httpserver.HttpHandler)). +The default handler can be changed +with [defaultHandler()](). -Authentication and HTTPS ------------------------- +## Authentication and HTTPS -* [authenticator()](/client_java/api/io/prometheus/metrics/exporter/httpserver/HTTPServer.Builder.html#authenticator(com.sun.net.httpserver.Authenticator)) is for configuring authentication. -* [httpsConfigurator()](/client_java/api/io/prometheus/metrics/exporter/httpserver/HTTPServer.Builder.html#httpsConfigurator(com.sun.net.httpserver.HttpsConfigurator)) is for configuring HTTPS. +- [authenticator()]() + is for configuring authentication. +- [httpsConfigurator()]() + is for configuring HTTPS. -You can find an example of authentication and SSL in the [jmx_exporter](https://github.com/prometheus/jmx_exporter). +You can find an example of authentication and SSL in the +[jmx_exporter](https://github.com/prometheus/jmx_exporter). -Properties ----------- +## Properties See _config_ section (_todo_) on runtime configuration options. -* `io.prometheus.exporter.httpServer.port`: The port to bind to. +- `io.prometheus.exporter.http_server.port`: The port to bind to. diff --git a/docs/content/exporters/pushgateway.md b/docs/content/exporters/pushgateway.md new file mode 100644 index 000000000..497aa9b57 --- /dev/null +++ b/docs/content/exporters/pushgateway.md @@ -0,0 +1,125 @@ +--- +title: Pushgateway +weight: 6 +--- + +The [Prometheus Pushgateway](https://github.com/prometheus/pushgateway) exists to allow ephemeral +and batch jobs to expose their metrics to Prometheus. +Since these kinds of jobs may not exist long enough to be scraped, they can instead push their +metrics to a Pushgateway. +The Pushgateway then exposes these metrics to Prometheus. + +The [PushGateway](/client_java/api/io/prometheus/metrics/exporter/pushgateway/PushGateway.html) Java +class allows you to push metrics to a Prometheus Pushgateway. + +## Example + +{{< tabs "uniqueid" >}} +{{< tab "Gradle" >}} + +```groovy +implementation 'io.prometheus:prometheus-metrics-core:1.3.0' +implementation 'io.prometheus:prometheus-metrics-exporter-pushgateway:1.3.0' +``` + +{{< /tab >}} +{{< tab "Maven" >}} + +```xml + + io.prometheus + prometheus-metrics-core + 1.3.0 + + + io.prometheus + prometheus-metrics-exporter-pushgateway + 1.3.0 + +``` + +{{< /tab >}} +{{< /tabs >}} + +```java +public class ExampleBatchJob { + + private static PushGateway pushGateway = PushGateway.builder() + .address("localhost:9091") // not needed as localhost:9091 is the default + .job("example") + .build(); + + private static Gauge dataProcessedInBytes = Gauge.builder() + .name("data_processed") + .help("data processed in the last batch job run") + .unit(Unit.BYTES) + .register(); + + public static void main(String[] args) throws Exception { + try { + long bytesProcessed = processData(); + dataProcessedInBytes.set(bytesProcessed); + } finally { + pushGateway.push(); + } + } + + public static long processData() { + // Imagine a batch job here that processes data + // and returns the number of Bytes processed. + return 42; + } +} +``` + +## Basic Auth + +The [PushGateway](/client_java/api/io/prometheus/metrics/exporter/pushgateway/PushGateway.html) +supports basic authentication. + +```java +PushGateway pushGateway = PushGateway.builder() + .job("example") + .basicAuth("my_user", "my_password") + .build(); +``` + +The `PushGatewayTestApp` in `integration-tests/it-pushgateway` has a complete example of this. + +## Bearer token + +The [PushGateway](/client_java/api/io/prometheus/metrics/exporter/pushgateway/PushGateway.html) +supports Bearer token authentication. + +```java +PushGateway pushGateway = PushGateway.builder() + .job("example") + .bearerToken("my_token") + .build(); +``` + +The `PushGatewayTestApp` in `integration-tests/it-pushgateway` has a complete example of this. + +## SSL + +The [PushGateway](/client_java/api/io/prometheus/metrics/exporter/pushgateway/PushGateway.html) +supports SSL. + +```java +PushGateway pushGateway = PushGateway.builder() + .job("example") + .scheme(Scheme.HTTPS) + .build(); +``` + +However, this requires that the JVM can validate the server certificate. + +If you want to skip certificate verification, you need to provide your own +[HttpConnectionFactory](/client_java/api/io/prometheus/metrics/exporter/pushgateway/HttpConnectionFactory.html). +The `PushGatewayTestApp` in `integration-tests/it-pushgateway` has a complete example of this. + +## Configuration Properties + +The [PushGateway](/client_java/api/io/prometheus/metrics/exporter/pushgateway/PushGateway.html) +supports a couple of properties that can be configured at runtime. +See [config]({{< relref "../config/config.md" >}}). diff --git a/docs/content/exporters/servlet.md b/docs/content/exporters/servlet.md index 36a5161cf..2b0873b70 100644 --- a/docs/content/exporters/servlet.md +++ b/docs/content/exporters/servlet.md @@ -1,15 +1,18 @@ --- title: Servlet -weight: 4 +weight: 5 --- -The [PrometheusMetricsServlet](/client_java/api/io/prometheus/metrics/exporter/servlet/jakarta/PrometheusMetricsServlet.html) is a [Jakarta Servlet](https://jakarta.ee/specifications/servlet/) for exposing a metric endpoint. +The +[PrometheusMetricsServlet](/client_java/api/io/prometheus/metrics/exporter/servlet/jakarta/PrometheusMetricsServlet.html) +is a [Jakarta Servlet](https://jakarta.ee/specifications/servlet/) for exposing a metric endpoint. -web.xml -------- +## web.xml The old-school way of configuring a servlet is in a `web.xml` file: + + ```xml ``` -Programmatic ------------- - -Today, most Servlet applications use an embedded Servlet container and configure Servlets programmatically rather than via `web.xml`. -The API for that depends on the Servlet container. -The [examples](https://github.com/prometheus/client_java/tree/1.0.x/examples) directory has an example of an embedded [Tomcat](https://tomcat.apache.org/) container with the [PrometheusMetricsServlet](/client_java/api/io/prometheus/metrics/exporter/servlet/jakarta/PrometheusMetricsServlet.html) configured. + -Spring ------- +## Programmatic -You can use the [PrometheusMetricsServlet](/client_java/api/io/prometheus/metrics/exporter/servlet/jakarta/PrometheusMetricsServlet.html) in Spring applications. See [our Spring doc]({{< relref "spring.md" >}}). +Today, most Servlet applications use an embedded Servlet container and configure Servlets +programmatically rather than via `web.xml`. +The API for that depends on the Servlet container. +The [examples](https://github.com/prometheus/client_java/tree/1.0.x/examples) directory has an +example of an embedded +[Tomcat](https://tomcat.apache.org/) container with the +[PrometheusMetricsServlet](/client_java/api/io/prometheus/metrics/exporter/servlet/jakarta/PrometheusMetricsServlet.html) +configured. + +## Spring + +You can use +the [PrometheusMetricsServlet](/client_java/api/io/prometheus/metrics/exporter/servlet/jakarta/PrometheusMetricsServlet.html) +in Spring applications. +See [our Spring doc]({{< relref "spring.md" >}}). diff --git a/docs/content/exporters/spring.md b/docs/content/exporters/spring.md index 73e4b2549..45df21431 100644 --- a/docs/content/exporters/spring.md +++ b/docs/content/exporters/spring.md @@ -1,35 +1,42 @@ --- title: Spring -weight: 5 +weight: 7 --- -Alternative: Use Spring's Built-in Metrics Library --------------------------------------------------- +## Alternative: Use Spring's Built-in Metrics Library -[Spring Boot](https://spring.io/projects/spring-boot) has a built-in metric library named [Micrometer](https://micrometer.io/), which supports Prometheus exposition format and can be set up in three simple steps: +[Spring Boot](https://spring.io/projects/spring-boot) has a built-in metric library named +[Micrometer](https://micrometer.io/), which supports Prometheus +exposition format and can be set up in three simple steps: 1. Add the `org.springframework.boot:spring-boot-starter-actuator` dependency. 2. Add the `io.micrometer:micrometer-registry-prometheus` as a _runtime_ dependency. -3. Enable the Prometheus endpoint by adding the line `management.endpoints.web.exposure.include=prometheus` to `application.properties`. +3. Enable the Prometheus endpoint by adding the line + `management.endpoints.web.exposure.include=prometheus` to `application.properties`. Note that Spring's default Prometheus endpoint is `/actuator/prometheus`, not `/metrics`. -In most cases the built-in Spring metrics library will work for you and you don't need the Prometheus Java library in Spring applications. +In most cases the built-in Spring metrics library will work for you and you don't need the +Prometheus Java library in Spring applications. -Use the Prometheus Metrics Library in Spring --------------------------------------------- +## Use the Prometheus Metrics Library in Spring -However, you may have your reasons why you want to use the Prometheus metrics library in Spring anyway. Maybe you want full support for all Prometheus metric types, or you want to use the new Prometheus native histograms. +However, you may have your reasons why you want to use the Prometheus metrics library in +Spring anyway. Maybe you want full support for all Prometheus metric types, +or you want to use the new Prometheus native histograms. -The easiest way to use the Prometheus metrics library in Spring is to configure the [PrometheusMetricsServlet](/client_java/api/io/prometheus/metrics/exporter/servlet/jakarta/PrometheusMetricsServlet.html) to expose metrics. +The easiest way to use the Prometheus metrics library in Spring is to configure the +[PrometheusMetricsServlet](/client_java/api/io/prometheus/metrics/exporter/servlet/jakarta/PrometheusMetricsServlet.html) +to expose metrics. Dependencies: -* `prometheus-metrics-core`: The core metrics library. -* `prometheus-metrics-exporter-servlet-jakarta`: For providing the `/metrics` endpoint. -* `prometheus-metrics-instrumentation-jvm`: Optional - JVM metrics +- `prometheus-metrics-core`: The core metrics library. +- `prometheus-metrics-exporter-servlet-jakarta`: For providing the `/metrics` endpoint. +- `prometheus-metrics-instrumentation-jvm`: Optional - JVM metrics -The following is the complete source code of a Spring Boot REST service using the Prometheus metrics library: +The following is the complete source code of a Spring Boot REST service using +the Prometheus metrics library: ```java import io.prometheus.metrics.core.metrics.Counter; @@ -46,35 +53,40 @@ import org.springframework.web.bind.annotation.RestController; @RestController public class DemoApplication { - private static final Counter requestCount = Counter.builder() - .name("requests_total") - .register(); - - public static void main(String[] args) { - SpringApplication.run(DemoApplication.class, args); - JvmMetrics.builder().register(); - } - - @GetMapping("/") - public String sayHello() throws InterruptedException { - requestCount.inc(); - return "Hello, World!\n"; - } - - @Bean - public ServletRegistrationBean createPrometheusMetricsEndpoint() { - return new ServletRegistrationBean<>(new PrometheusMetricsServlet(), "/metrics/*"); - } + private static final Counter requestCount = Counter.builder() + .name("requests_total") + .register(); + + public static void main(String[] args) { + SpringApplication.run(DemoApplication.class, args); + JvmMetrics.builder().register(); + } + + @GetMapping("/") + public String sayHello() throws InterruptedException { + requestCount.inc(); + return "Hello, World!\n"; + } + + @Bean + public ServletRegistrationBean createPrometheusMetricsEndpoint() { + return new ServletRegistrationBean<>(new PrometheusMetricsServlet(), "/metrics/*"); + } } ``` -The important part are the last three lines: They configure the [PrometheusMetricsServlet](/client_java/api/io/prometheus/metrics/exporter/servlet/jakarta/PrometheusMetricsServlet.html) to expose metrics on `/metrics`: +The important part are the last three lines: They configure the +[PrometheusMetricsServlet](/client_java/api/io/prometheus/metrics/exporter/servlet/jakarta/PrometheusMetricsServlet.html) +to expose metrics on `/metrics`: ```java + @Bean public ServletRegistrationBean createPrometheusMetricsEndpoint() { - return new ServletRegistrationBean<>(new PrometheusMetricsServlet(), "/metrics/*"); + return new ServletRegistrationBean<>(new PrometheusMetricsServlet(), "/metrics/*"); } ``` -The example provides a _Hello, world!_ endpoint on [http://localhost:8080](http://localhost:8080), and Prometheus metrics on [http://localhost:8080/metrics](http://localhost:8080/metrics). +The example provides a _Hello, world!_ endpoint on +[http://localhost:8080](http://localhost:8080), and Prometheus metrics on +[http://localhost:8080/metrics](http://localhost:8080/metrics). diff --git a/docs/content/exporters/unicode.md b/docs/content/exporters/unicode.md new file mode 100644 index 000000000..2a34e0400 --- /dev/null +++ b/docs/content/exporters/unicode.md @@ -0,0 +1,34 @@ +--- +title: Unicode +weight: 2 +--- + +{{< hint type=warning >}} +Unicode support is experimental, because [OpenMetrics specification](https://openmetrics.io/) is not +updated yet to support Unicode characters in metric and label names. +{{< /hint >}} + +The Prometheus Java client library allows all Unicode characters, that can be encoded as UTF-8. + +At scrape time, some characters are replaced based on the `encoding` header according +to +the [Escaping scheme](https://github.com/prometheus/docs/blob/main/docs/instrumenting/escaping_schemes.md). + +For example, if you use the `underscores` escaping scheme, dots in metric and label names are +replaced with underscores, so that the metric name `http.server.duration` becomes +`http_server_duration`. + +Prometheus servers that do not support Unicode at all will not pass the `encoding` header, and the +Prometheus Java client library will replace dots, as well as any character that is not in the legacy +character set (`a-zA-Z0-9_:`), with underscores by default. + +When `escaping=allow-utf-8` is passed, add valid UTF-8 characters to the metric and label names +without replacing them. This allows you to use dots in metric and label names, as well as +other UTF-8 characters, without any replacements. + +## PushGateway + +When using the [Pushgateway]({{< relref "pushgateway.md" >}}), Unicode support has to be enabled +explicitly by setting `io.prometheus.exporter.pushgateway.escapingScheme` to `allow-utf-8` in the +Pushgateway configuration file - see +[Pushgateway configuration]({{< relref "/config/config.md#exporter-pushgateway-properties" >}}) diff --git a/docs/content/getting-started/callbacks.md b/docs/content/getting-started/callbacks.md index b94033fc5..514d74c2a 100644 --- a/docs/content/getting-started/callbacks.md +++ b/docs/content/getting-started/callbacks.md @@ -3,11 +3,14 @@ title: Callbacks weight: 5 --- -The section on [metric types](../metric-types) showed how to use metrics that actively maintain their state. +The section on [metric types]({{< relref "metric-types.md" >}}) +showed how to use metrics that actively maintain their state. -This section shows how to create callback-based metrics, i.e. metrics that invoke a callback at scrape time to get the current values. +This section shows how to create callback-based metrics, i.e. metrics that invoke a callback +at scrape time to get the current values. -For example, let's assume we have two instances of a `Cache`, a `coldCache` and a `hotCache`. The following implements a callback-based `cache_size_bytes` metric: +For example, let's assume we have two instances of a `Cache`, a `coldCache` and a `hotCache`. +The following implements a callback-based `cache_size_bytes` metric: ```java Cache coldCache = new Cache(); @@ -27,7 +30,7 @@ GaugeWithCallback.builder() The resulting text format looks like this: -``` +```text # TYPE cache_size_bytes gauge # UNIT cache_size_bytes bytes # HELP cache_size_bytes Size of the cache in Bytes. @@ -35,15 +38,17 @@ cache_size_bytes{state="cold"} 78.0 cache_size_bytes{state="hot"} 83.0 ``` -Better examples of callback metrics can be found in the `prometheus-metrics-instrumentation-jvm` module. +Better examples of callback metrics can be found in the `prometheus-metrics-instrumentation-jvm` +module. The available callback metric types are: -* `GaugeWithCallback` for gauges. -* `CounterWithCallback` for counters. -* `SummaryWithCallback` for summaries. +- `GaugeWithCallback` for gauges. +- `CounterWithCallback` for counters. +- `SummaryWithCallback` for summaries. -The API for gauges and counters is very similar. For summaries the callback has a few more parameters, because it accepts a count, a sum, and quantiles: +The API for gauges and counters is very similar. For summaries the callback has a few more +parameters, because it accepts a count, a sum, and quantiles: ```java SummaryWithCallback.builder() diff --git a/docs/content/getting-started/labels.md b/docs/content/getting-started/labels.md index 5aeb5f215..d056a6ce6 100644 --- a/docs/content/getting-started/labels.md +++ b/docs/content/getting-started/labels.md @@ -5,7 +5,7 @@ weight: 3 The following shows an example of a Prometheus metric in text format: -``` +```text # HELP payments_total total number of payments # TYPE payments_total counter payments_total{status="error",type="paypal"} 1.0 @@ -13,12 +13,14 @@ payments_total{status="success",type="credit card"} 3.0 payments_total{status="success",type="paypal"} 2.0 ``` -The example shows a counter metric named `payments_total` with two labels: `status` and `type`. Each individual data point (each line in text format) is identified by the unique combination of its metric name and its label name/value pairs. +The example shows a counter metric named `payments_total` with two labels: `status` and `type`. +Each individual data point (each line in text format) is identified by the unique combination of +its metric name and its label name/value pairs. -Creating a Metric with Labels ------------------------------ +## Creating a Metric with Labels -Labels are supported for all metric types. We are using counters in this example, however the `labelNames()` and `labelValues()` methods are the same for other metric types. +Labels are supported for all metric types. We are using counters in this example, however the +`labelNames()` and `labelValues()` methods are the same for other metric types. The following code creates the counter above. @@ -34,10 +36,10 @@ counter.labelValues("paypal", "success").inc(2.0); counter.labelValues("paypal", "error").inc(1.0); ``` -The label names have to be specified when the metric is created and cannot change. The label values are created on demand when values are observed. +The label names have to be specified when the metric is created and cannot change. The label values +are created on demand when values are observed. -Creating a Metric without Labels --------------------------------- +## Creating a Metric without Labels Labels are optional. The following example shows a metric without labels: @@ -50,12 +52,13 @@ Counter counter = Counter.builder() counter.inc(3.0); ``` -Cardinality Explosion ---------------------- +## Cardinality Explosion -Each combination of label names and values will result in a new data point, i.e. a new line in text format. +Each combination of label names and values will result in a new data point, i.e. a new line in text +format. Therefore, a good label should have only a small number of possible values. -If you select labels with many possible values, like unique IDs or timestamps, you may end up with an enormous number of data points. +If you select labels with many possible values, like unique IDs or timestamps, +you may end up with an enormous number of data points. This is called cardinality explosion. Here's a bad example, don't do this: @@ -73,12 +76,12 @@ String timestamp = Long.toString(System.currentTimeMillis()); loginCount.labelValues(userId, timestamp).inc(); ``` -Initializing Label Values -------------------------- +## Initializing Label Values If you register a metric without labels, it will show up immediately with initial value of zero. -However, metrics with labels only show up after the label values are first used. In the example above +However, metrics with labels only show up after the label values are first used. In the example +above ```java counter.labelValues("paypal", "error").inc(); @@ -86,13 +89,14 @@ counter.labelValues("paypal", "error").inc(); The data point -``` +```text payments_total{status="error",type="paypal"} 1.0 ``` will jump from non-existent to value 1.0. You will never see it with value 0.0. -This is usually not an issue. However, if you find this annoying and want to see all possible label values from the start, you can initialize label values with `initLabelValues()` like this: +This is usually not an issue. However, if you find this annoying and want to see all possible label +values from the start, you can initialize label values with `initLabelValues()` like this: ```java Counter counter = Counter.builder() @@ -109,7 +113,7 @@ counter.initLabelValues("paypal", "error"); Now the four combinations will be visible from the start with initial value zero. -``` +```text # HELP payments_total total number of payments # TYPE payments_total counter payments_total{status="error",type="credit card"} 0.0 @@ -118,10 +122,10 @@ payments_total{status="success",type="credit card"} 0.0 payments_total{status="success",type="paypal"} 0.0 ``` -Expiring Unused Label Values ----------------------------- +## Expiring Unused Label Values -There is no automatic expiry of unused label values (yet). Once a set of label values is used, it will remain there forever. +There is no automatic expiry of unused label values (yet). Once a set of label values is used, it +will remain there forever. However, you can programmatically remove label values like this: @@ -130,8 +134,7 @@ counter.remove("paypal", "error"); counter.remove("paypal", "success"); ``` -Const Labels ------------- +## Const Labels If you have labels values that never change, you can specify them in the builder as `constLabels()`: @@ -144,6 +147,7 @@ Counter counter = Counter.builder() .register(); ``` -However, most use cases for `constLabels()` are better covered by target labels set by the scraping Prometheus server, +However, most use cases for `constLabels()` are better covered by target labels set by the scraping +Prometheus server, or by one specific metric (e.g. a `build_info` or a `machine_role` metric). See also -[target labels, not static scraped labels](https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels). +[target labels, not static scraped labels](https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels). diff --git a/docs/content/getting-started/metric-types.md b/docs/content/getting-started/metric-types.md index e10d51e9a..97424ef5c 100644 --- a/docs/content/getting-started/metric-types.md +++ b/docs/content/getting-started/metric-types.md @@ -1,19 +1,25 @@ --- -title: 'Metric Types' +title: "Metric Types" weight: 4 --- -The Prometheus Java metrics library implements the metric types defined in the [OpenMetrics](https://openmetrics.io) standard: +The Prometheus Java metrics library implements the metric types defined in +the [OpenMetrics](https://openmetrics.io) standard: {{< toc >}} -Counter -------- +## Counter -Counter is the most common and useful metric type. Counters can only increase, but never decrease. In the Prometheus query language, the [rate()](https://prometheus.io/docs/prometheus/latest/querying/functions/#rate) function is often used for counters to calculate the average increase per second. +Counter is the most common and useful metric type. Counters can only increase, but never decrease. +In the Prometheus query language, +the [rate()](https://prometheus.io/docs/prometheus/latest/querying/functions/#rate) function is +often used for counters to calculate the average increase per second. {{< hint type=note >}} -Counter values do not need to be integers. In many cases counters represent a number of events (like the number of requests), and in that case the counter value is an integer. However, counters can also be used for something like "total time spent doing something" in which case the counter value is a floating point number. +Counter values do not need to be integers. In many cases counters represent a number of events (like +the number of requests), and in that case the counter value is an integer. However, counters can +also be used for something like "total time spent doing something" in which case the counter value +is a floating point number. {{< /hint >}} Here's an example of a counter: @@ -28,12 +34,14 @@ Counter serviceTimeSeconds = Counter.builder() serviceTimeSeconds.inc(Unit.millisToSeconds(200)); ``` -The resulting counter has the value `0.2`. As `SECONDS` is the standard time unit in Prometheus, the `Unit` utility class has methods to convert other time units to seconds. +The resulting counter has the value `0.2`. As `SECONDS` is the standard time unit in Prometheus, the +`Unit` utility class has methods to convert other time units to seconds. -As defined in [OpenMetrics](https://openmetrics.io/), counter metric names must have the `_total` suffix. If you create a counter without the `_total` suffix the suffix will be appended automatically. +As defined in [OpenMetrics](https://openmetrics.io/), counter metric names must have the `_total` +suffix. If you create a counter without the `_total` suffix the suffix will be appended +automatically. -Gauge ------ +## Gauge Gauges are current measurements, such as the current temperature in Celsius. @@ -48,27 +56,36 @@ Gauge temperature = Gauge.builder() temperature.labelValues("Berlin").set(22.3); ``` -Histogram ---------- +## Histogram -Histograms are for observing distributions, like latency distributions for HTTP services or the distribution of request sizes. -Unlike with counters and gauges, each histogram data point has a complex data structure representing different aspects of the distribution: +Histograms are for observing distributions, like latency distributions for HTTP services or the +distribution of request sizes. +Unlike with counters and gauges, each histogram data point has a complex data structure representing +different aspects of the distribution: -* Count: The total number of observations. -* Sum: The sum of all observed values, e.g. the total time spent serving requests. -* Buckets: The histogram buckets representing the distribution. +- Count: The total number of observations. +- Sum: The sum of all observed values, e.g. the total time spent serving requests. +- Buckets: The histogram buckets representing the distribution. Prometheus supports two flavors of histograms: -* Classic histograms: Bucket boundaries are explicitly defined when the histogram is created. -* Native histograms (exponential histograms): Infinitely many virtual buckets. +- Classic histograms: Bucket boundaries are explicitly defined when the histogram is created. +- Native histograms (exponential histograms): Infinitely many virtual buckets. -By default, histograms maintain both flavors. Which one is used depends on the scrape request from the Prometheus server. -* By default, the Prometheus server will scrape metrics in OpenMetrics format and get the classic histogram flavor. -* If the Prometheus server is started with `--enable-feature=native-histograms`, it will request metrics in Prometheus protobuf format and ingest the native histogram. -* If the Prometheus server is started with `--enable-feature=native-histogram` and the scrape config has the option `scrape_classic_histograms: true`, it will request metrics in Prometheus protobuf format and ingest both, the classic and the native flavor. This is great for migrating from classic histograms to native histograms. +By default, histograms maintain both flavors. Which one is used depends on the scrape request from +the Prometheus server. -See [examples/example-native-histogram](https://github.com/prometheus/client_java/tree/1.0.x/examples/example-native-histogram) for an example. +- By default, the Prometheus server will scrape metrics in OpenMetrics format and get the classic + histogram flavor. +- If the Prometheus server is started with `--enable-feature=native-histograms`, it will request + metrics in Prometheus protobuf format and ingest the native histogram. +- If the Prometheus server is started with `--enable-feature=native-histogram` and the scrape config + has the option `scrape_classic_histograms: true`, it will request metrics in Prometheus protobuf + format and ingest both, the classic and the native flavor. This is great for migrating from + classic histograms to native histograms. + +See [examples/example-native-histogram](https://github.com/prometheus/client_java/tree/1.0.x/examples/example-native-histogram) +for an example. ```java Histogram duration = Histogram.builder() @@ -83,17 +100,118 @@ long start = System.nanoTime(); duration.labelValues("GET", "/", "200").observe(Unit.nanosToSeconds(System.nanoTime() - start)); ``` -Histograms implement the [TimerApi](/client_java/api/io/prometheus/metrics/core/datapoints/TimerApi.html) interface, which provides convenience methods for measuring durations. +Histograms implement +the [TimerApi](/client_java/api/io/prometheus/metrics/core/datapoints/TimerApi.html) interface, +which provides convenience methods for measuring durations. + +The histogram builder provides a lot of configuration for fine-tuning the histogram behavior. In +most cases you don't need them, defaults are good. The following is an incomplete list showing the +most important options: + +- `nativeOnly()` / `classicOnly()`: Create a histogram with one representation only. +- `classicUpperBounds(...)`: Set the classic bucket upper boundaries. Default bucket upper + boundaries are `.005`, `.01`, `.025`, `.05`, `.1`, `.25`, `.5`, `1`, `2.5`, `5`, `and 10`. The + default bucket boundaries are designed for measuring request durations in seconds. +- `nativeMaxNumberOfBuckets()`: Upper limit for the number of native histogram buckets. + Default is 160. When the maximum is reached, the native histogram automatically + reduces resolution to stay below the limit. + +See Javadoc +for [Histogram.Builder](/client_java/api/io/prometheus/metrics/core/metrics/Histogram.Builder.html) +for a complete list of options. Some options can be configured at runtime, +see [config]({{< relref "../config/config.md" >}}). + +### Custom Bucket Boundaries + +The default bucket boundaries are designed for measuring request durations in seconds. For other +use cases, you may want to define custom bucket boundaries. The histogram builder provides three +methods for this: -The histogram builder provides a lot of configuration for fine-tuning the histogram behavior. In most cases you don't need them, defaults are good. The following is an incomplete list showing the most important options: +**1. Arbitrary Custom Boundaries** + +Use `classicUpperBounds(...)` to specify arbitrary bucket boundaries: + +```java +Histogram responseSize = Histogram.builder() + .name("http_response_size_bytes") + .help("HTTP response size in bytes") + .classicUpperBounds(100, 1000, 10000, 100000, 1000000) // bytes + .register(); +``` -* `nativeOnly()` / `classicOnly()`: Create a histogram with one representation only. -* `classicBuckets(...)`: Set the classic bucket boundaries. Default buckets are `.005`, `.01`, `.025`, `.05`, `.1`, `.25`, `.5`, `1`, `2.5`, `5`, `and 10`. The default bucket boundaries are designed for measuring request durations in seconds. -* `nativeMaxNumberOfBuckets()`: Upper limit for the number of native histogram buckets. Default is 160. When the maximum is reached, the native histogram automatically reduces resolution to stay below the limit. +**2. Linear Boundaries** -See Javadoc for [Histogram.Builder](/client_java/api/io/prometheus/metrics/core/metrics/Histogram.Builder.html) for a complete list of options. Some options can be configured at runtime, see [config](../../config/config). +Use `classicLinearUpperBounds(start, width, count)` for equal-width buckets: + +```java +Histogram queueSize = Histogram.builder() + .name("queue_size") + .help("Number of items in queue") + .classicLinearUpperBounds(10, 10, 10) // 10, 20, 30, ..., 100 + .register(); +``` + +**3. Exponential Boundaries** + +Use `classicExponentialUpperBounds(start, factor, count)` for exponential growth: + +```java +Histogram dataSize = Histogram.builder() + .name("data_size_bytes") + .help("Data size in bytes") + .classicExponentialUpperBounds(100, 10, 5) // 100, 1k, 10k, 100k, 1M + .register(); +``` -Histograms and summaries are both used for observing distributions. Therefore, the both implement the `DistributionDataPoint` interface. Using the `DistributionDataPoint` interface directly gives you the option to switch between histograms and summaries later with minimal code changes. +### Native Histograms with Custom Buckets (NHCB) + +Prometheus supports a special mode called Native Histograms with Custom Buckets (NHCB) that uses +schema -53. In this mode, custom bucket boundaries from classic histograms are preserved when +converting to native histograms. + +The Java client library automatically supports NHCB: + +1. By default, histograms maintain both classic (with custom buckets) and native representations +2. The classic representation with custom buckets is exposed to Prometheus +3. Prometheus servers can convert these to NHCB upon ingestion when configured with the + `convert_classic_histograms_to_nhcb` scrape option + +Example: + +```java +// This histogram will work seamlessly with NHCB +Histogram apiLatency = Histogram.builder() + .name("api_request_duration_seconds") + .help("API request duration") + .classicUpperBounds(0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0) // custom boundaries + .register(); +``` + +On the Prometheus side, configure the scrape job: + +```yaml +scrape_configs: + - job_name: "my-app" + scrape_protocols: ["PrometheusProto"] + convert_classic_histograms_to_nhcb: true + static_configs: + - targets: ["localhost:9400"] +``` + +{{< hint type=note >}} +NHCB is useful when: + +- You need precise bucket boundaries for your specific use case +- You're migrating from classic histograms and want to preserve bucket boundaries +- Exponential bucketing from standard native histograms isn't a good fit for your distribution + {{< /hint >}} + +See [examples/example-custom-buckets](https://github.com/prometheus/client_java/tree/main/examples/example-custom-buckets) +for a complete example with Prometheus and Grafana. + +Histograms and summaries are both used for observing distributions. Therefore, the both implement +the `DistributionDataPoint` interface. Using the `DistributionDataPoint` interface directly gives +you the option to switch between histograms and summaries later with minimal code changes. Example of using the `DistributionDataPoint` interface for a histogram without labels: @@ -128,10 +246,10 @@ successfulEvents.observe(0.7); erroneousEvents.observe(0.2); ``` -Summary -------- +## Summary -Like histograms, summaries are for observing distributions. Each summary data point has a count and a sum like a histogram data point. +Like histograms, summaries are for observing distributions. Each summary data point has a count and +a sum like a histogram data point. However, rather than histogram buckets summaries maintain quantiles. ```java @@ -148,26 +266,37 @@ Summary requestLatency = Summary.builder() requestLatency.labelValues("ok").observe(2.7); ``` -The example above creates a summary with the 50th percentile (median), the 95th percentile, and the 99th percentile. Quantiles are optional, you can create a summary without quantiles if all you need is the count and the sum. +The example above creates a summary with the 50th percentile (median), the 95th percentile, and the +99th percentile. Quantiles are optional, you can create a summary without quantiles if all you need +is the count and the sum. {{< hint type=note >}} -The terms "percentile" and "quantile" mean the same thing. We use percentile when we express it as a number in [0, 100], and we use quantile when we express it as a number in [0.0, 1.0]. +The terms "percentile" and "quantile" mean the same thing. We use percentile when we express it as a +number in [0, 100], and we use quantile when we express it as a number in [0.0, 1.0]. {{< /hint >}} -The second parameter to `quantile()` is the maximum acceptable error. The call `.quantile(0.5, 0.01)` means that the actual quantile is somewhere in [0.49, 0.51]. Higher precision means higher memory usage. +The second parameter to `quantile()` is the maximum acceptable error. The call +`.quantile(0.5, 0.01)` means that the actual quantile is somewhere in [0.49, 0.51]. Higher precision +means higher memory usage. -The 0.0 quantile (min value) and the 1.0 quantile (max value) are special cases because you can get the precise values (error 0.0) with almost no memory overhead. +The 0.0 quantile (min value) and the 1.0 quantile (max value) are special cases because you can get +the precise values (error 0.0) with almost no memory overhead. -Quantile values are calculated based on a 5 minutes moving time window. The default time window can be changed with `maxAgeSeconds()` and `numberOfAgeBuckets()`. +Quantile values are calculated based on a 5 minutes moving time window. The default time window can +be changed with `maxAgeSeconds()` and `numberOfAgeBuckets()`. -Some options can be configured at runtime, see [config](../../config/config). +Some options can be configured at runtime, see [config]({{< relref "../config/config.md" >}}). -In general you should prefer histograms over summaries. The Prometheus query language has a function [histogram_quantile()](https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for calculating quantiles from histograms. The advantage of query-time quantile calculation is that you can aggregate histograms before calculating the quantile. With summaries you must use the quantile with all its labels as it is. +In general you should prefer histograms over summaries. The Prometheus query language has a +function [histogram_quantile()](https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) +for calculating quantiles from histograms. The advantage of query-time quantile calculation is that +you can aggregate histograms before calculating the quantile. With summaries you must use the +quantile with all its labels as it is. -Info ----- +## Info -Info metrics are used to expose textual information which should not change during process lifetime. The value of an Info metric is always `1`. +Info metrics are used to expose textual information which should not change during process lifetime. +The value of an Info metric is always `1`. ```java Info info = Info.builder() @@ -185,20 +314,27 @@ info.setLabelValues(version, vendor, runtime); The info above looks as follows in OpenMetrics text format: -``` + + +```text # TYPE jvm_runtime info # HELP jvm_runtime JVM runtime info jvm_runtime_info{runtime="OpenJDK Runtime Environment",vendor="Oracle Corporation",version="1.8.0_382-b05"} 1 ``` -The example is taken from the `prometheus-metrics-instrumentation-jvm` module, so if you have `JvmMetrics` registered you should have a `jvm_runtime_info` metric out-of-the-box. + -As defined in [OpenMetrics](https://openmetrics.io/), info metric names must have the `_info` suffix. If you create a counter without the `_info` suffix the suffix will be appended automatically. +The example is taken from the `prometheus-metrics-instrumentation-jvm` module, so if you have +`JvmMetrics` registered you should have a `jvm_runtime_info` metric out-of-the-box. -StateSet --------- +As defined in [OpenMetrics](https://openmetrics.io/), info metric names must have the `_info` +suffix. If you create a counter without the `_info` suffix the suffix will be appended +automatically. -StateSet are a niche metric type in the OpenMetrics standard that is rarely used. The main use case is to signal which feature flags are enabled. +## StateSet + +StateSet are a niche metric type in the OpenMetrics standard that is rarely used. The main use case +is to signal which feature flags are enabled. ```java StateSet stateSet = StateSet.builder() @@ -214,16 +350,20 @@ stateSet.labelValues("dev").setTrue("feature2"); The OpenMetrics text format looks like this: -``` +```text # TYPE feature_flags stateset # HELP feature_flags Feature flags feature_flags{env="dev",feature_flags="feature1"} 0 feature_flags{env="dev",feature_flags="feature2"} 1 ``` -GaugeHistogram and Unknown --------------------------- +## GaugeHistogram and Unknown -These types are defined in the [OpenMetrics](https://openmetrics.io/) standard but not implemented in the `prometheus-metrics-core` API. +These types are defined in the [OpenMetrics](https://openmetrics.io/) standard but not implemented +in the `prometheus-metrics-core` API. However, `prometheus-metrics-model` implements the underlying data model for these types. -To use these types, you need to implement your own `Collector` where the `collect()` method returns an `UnknownSnapshot` or a `HistogramSnapshot` with `.gaugeHistogram(true)`. +To use these types, you need to implement your own `Collector` where the `collect()` method returns +an `UnknownSnapshot` or a `HistogramSnapshot` with `.gaugeHistogram(true)`. +If your custom collector does not implement `getMetricType()` and `getLabelNames()`, ensure it does +not produce the same metric name and label set as another collector, or the exposition may contain +duplicate time series. diff --git a/docs/content/getting-started/multi-target.md b/docs/content/getting-started/multi-target.md index 6a1c0412c..16f85ac40 100644 --- a/docs/content/getting-started/multi-target.md +++ b/docs/content/getting-started/multi-target.md @@ -7,108 +7,118 @@ weight: 7 This is for the upcoming release 1.1.0. {{< /hint >}} -To support multi-target pattern you can create a custom collector overriding the purposed internal method in ExtendedMultiCollector +To support multi-target pattern you can create a custom collector overriding the purposed internal +method in ExtendedMultiCollector see SampleExtendedMultiCollector in io.prometheus.metrics.examples.httpserver + + ```java public class SampleExtendedMultiCollector extends ExtendedMultiCollector { - public SampleExtendedMultiCollector() { - super(); - } - - @Override - protected MetricSnapshots collectMetricSnapshots(PrometheusScrapeRequest scrapeRequest) { - - GaugeSnapshot.Builder gaugeBuilder = GaugeSnapshot.builder(); - gaugeBuilder.name("x_load").help("process load"); - - CounterSnapshot.Builder counterBuilder = CounterSnapshot.builder(); - counterBuilder.name(PrometheusNaming.sanitizeMetricName("x_calls_total")).help("invocations"); - - String[] targetNames = scrapeRequest.getParameterValues("target"); - String targetName; - String[] procs = scrapeRequest.getParameterValues("proc"); - if (targetNames == null || targetNames.length == 0) { - targetName = "defaultTarget"; - procs = null; //ignore procs param - } else { - targetName = targetNames[0]; - } - Builder counterDataPointBuilder = CounterSnapshot.CounterDataPointSnapshot.builder(); - io.prometheus.metrics.model.snapshots.GaugeSnapshot.GaugeDataPointSnapshot.Builder gaugeDataPointBuilder = GaugeSnapshot.GaugeDataPointSnapshot.builder(); - Labels lbls = Labels.of("target", targetName); - - if (procs == null || procs.length == 0) { - counterDataPointBuilder.labels(lbls.merge(Labels.of("proc", "defaultProc"))); - gaugeDataPointBuilder.labels(lbls.merge(Labels.of("proc", "defaultProc"))); - counterDataPointBuilder.value(70); - gaugeDataPointBuilder.value(Math.random()); - - counterBuilder.dataPoint(counterDataPointBuilder.build()); - gaugeBuilder.dataPoint(gaugeDataPointBuilder.build()); - - } else { - for (int i = 0; i < procs.length; i++) { - counterDataPointBuilder.labels(lbls.merge(Labels.of("proc", procs[i]))); - gaugeDataPointBuilder.labels(lbls.merge(Labels.of("proc", procs[i]))); - counterDataPointBuilder.value(Math.random()); - gaugeDataPointBuilder.value(Math.random()); - - counterBuilder.dataPoint(counterDataPointBuilder.build()); - gaugeBuilder.dataPoint(gaugeDataPointBuilder.build()); - } - } - Collection snaps = new ArrayList(); - snaps.add(counterBuilder.build()); - snaps.add(gaugeBuilder.build()); - MetricSnapshots msnaps = new MetricSnapshots(snaps); - return msnaps; - } - - public List getPrometheusNames() { - List names = new ArrayList(); - names.add("x_calls_total"); - names.add("x_load"); - return names; - } + public SampleExtendedMultiCollector() { + super(); + } + + @Override + protected MetricSnapshots collectMetricSnapshots(PrometheusScrapeRequest scrapeRequest) { + + GaugeSnapshot.Builder gaugeBuilder = GaugeSnapshot.builder(); + gaugeBuilder.name("x_load").help("process load"); + + CounterSnapshot.Builder counterBuilder = CounterSnapshot.builder(); + counterBuilder.name(PrometheusNaming.sanitizeMetricName("x_calls_total")).help("invocations"); + + String[] targetNames = scrapeRequest.getParameterValues("target"); + String targetName; + String[] procs = scrapeRequest.getParameterValues("proc"); + if (targetNames == null || targetNames.length == 0) { + targetName = "defaultTarget"; + procs = null; //ignore procs param + } else { + targetName = targetNames[0]; + } + Builder counterDataPointBuilder = CounterSnapshot.CounterDataPointSnapshot.builder(); + io.prometheus.metrics.model.snapshots.GaugeSnapshot.GaugeDataPointSnapshot.Builder gaugeDataPointBuilder = GaugeSnapshot.GaugeDataPointSnapshot.builder(); + Labels lbls = Labels.of("target", targetName); + + if (procs == null || procs.length == 0) { + counterDataPointBuilder.labels(lbls.merge(Labels.of("proc", "defaultProc"))); + gaugeDataPointBuilder.labels(lbls.merge(Labels.of("proc", "defaultProc"))); + counterDataPointBuilder.value(70); + gaugeDataPointBuilder.value(Math.random()); + + counterBuilder.dataPoint(counterDataPointBuilder.build()); + gaugeBuilder.dataPoint(gaugeDataPointBuilder.build()); + + } else { + for (int i = 0; i < procs.length; i++) { + counterDataPointBuilder.labels(lbls.merge(Labels.of("proc", procs[i]))); + gaugeDataPointBuilder.labels(lbls.merge(Labels.of("proc", procs[i]))); + counterDataPointBuilder.value(Math.random()); + gaugeDataPointBuilder.value(Math.random()); + + counterBuilder.dataPoint(counterDataPointBuilder.build()); + gaugeBuilder.dataPoint(gaugeDataPointBuilder.build()); + } + } + Collection snaps = new ArrayList(); + snaps.add(counterBuilder.build()); + snaps.add(gaugeBuilder.build()); + MetricSnapshots msnaps = new MetricSnapshots(snaps); + return msnaps; + } + + public List getPrometheusNames() { + List names = new ArrayList(); + names.add("x_calls_total"); + names.add("x_load"); + return names; + } } ``` -`PrometheusScrapeRequest` provides methods to access http-related infos from the request originally received by the endpoint + + + +`PrometheusScrapeRequest` provides methods to access http-related infos from the request originally +received by the endpoint ```java public interface PrometheusScrapeRequest { - String getRequestURI(); + String getRequestURI(); - String[] getParameterValues(String name); + String[] getParameterValues(String name); } ``` - Sample Prometheus scrape_config +```yaml +- job_name: "multi-target" + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + params: + proc: [proc1, proc2] + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - target_label: __address__ + replacement: localhost:9401 + static_configs: + - targets: ["target1", "target2"] ``` - - job_name: "multi-target" - - # metrics_path defaults to '/metrics' - # scheme defaults to 'http'. - params: - proc: [proc1, proc2] - relabel_configs: - - source_labels: [__address__] - target_label: __param_target - - source_labels: [__param_target] - target_label: instance - - target_label: __address__ - replacement: localhost:9401 - static_configs: - - targets: ["target1", "target2"] -``` + It's up to the specific MultiCollector implementation how to interpret the _target_ parameter. -It might be an explicit real target (i.e. via host name/ip address) or as an alias in some internal configuration. -The latter is more suitable when the MultiCollector implementation is a proxy (see https://github.com/prometheus/snmp_exporter) -In this case, invoking real target might require extra parameters (e.g. credentials) that might be complex to manage in Prometheus configuration -(not considering the case where the proxy might become an "open relay") \ No newline at end of file +It might be an explicit real target (i.e. via host name/ip address) or as an alias in some internal +configuration. +The latter is more suitable when the MultiCollector implementation is a proxy ( +see ) +In this case, invoking real target might require extra parameters (e.g. credentials) that might be +complex to manage in Prometheus configuration +(not considering the case where the proxy might become an "open relay") diff --git a/docs/content/getting-started/performance.md b/docs/content/getting-started/performance.md index 43e95c430..435f0d18a 100644 --- a/docs/content/getting-started/performance.md +++ b/docs/content/getting-started/performance.md @@ -5,10 +5,10 @@ weight: 6 This section has tips on how to use the Prometheus Java client in high performance applications. -Specify Label Values Only Once ------------------------------- +## Specify Label Values Only Once -For high performance applications, we recommend to specify label values only once, and then use the data point directly. +For high performance applications, we recommend to specify label values only once, and then use the +data point directly. This applies to all metric types. Let's use a counter as an example here: @@ -26,7 +26,8 @@ You could increment the counter above like this: requestCount.labelValue("/", "200").inc(); ``` -However, the line above does not only increment the counter, it also looks up the label values to find the right data point. +However, the line above does not only increment the counter, it also looks up the label values to +find the right data point. In high performance applications you can optimize this by looking up the data point only once: @@ -40,39 +41,48 @@ Now, you can increment the data point directly, which is a highly optimized oper successfulCalls.inc(); ``` -Enable Only One Histogram Representation ----------------------------------------- +## Enable Only One Histogram Representation -By default, histograms maintain two representations under the hood: The classic histogram representation with static buckets, and the native histogram representation with dynamic buckets. +By default, histograms maintain two representations under the hood: The classic histogram +representation with static buckets, and the native histogram representation with dynamic buckets. -While this default provides the flexibility to scrape different representations at runtime, it comes at a cost, because maintaining multiple representations causes performance overhead. +While this default provides the flexibility to scrape different representations at runtime, it comes +at a cost, because maintaining multiple representations causes performance overhead. -In performance critical applications we recommend to use either the classic representation or the native representation, but not both. +In performance critical applications we recommend to use either the classic representation or the +native representation, but not both. -You can either configure this in code for each histogram by calling [classicOnly()](/client_java/api/io/prometheus/metrics/core/metrics/Histogram.Builder.html#classicOnly()) or [nativeOnly()](/client_java/api/io/prometheus/metrics/core/metrics/Histogram.Builder.html#nativeOnly()), or you use the corresponding [config options](../../config/config/). +You can either configure this in code for each histogram by +calling [classicOnly()]() +or [nativeOnly()](), +or you use the corresponding [config options]({{< relref "../config/config.md" >}}). One way to do this is with system properties in the command line when you start your application ```sh -java -Dio.prometheus.metrics.histogramClassicOnly=true my-app.jar +java -Dio.prometheus.metrics.histogram_classic_only=true my-app.jar ``` or ```sh -java -Dio.prometheus.metrics.histogramNativeOnly=true my-app.jar +java -Dio.prometheus.metrics.histogram_native_only=true my-app.jar ``` -If you don't want to add a command line parameter every time you start your application, you can add a `prometheus.properties` file to your classpath (put it in the `src/main/resources/` directory so that it gets packed into your JAR file). The `prometheus.properties` file should have the following line: +If you don't want to add a command line parameter every time you start your application, you can add +a `prometheus.properties` file to your classpath (put it in the `src/main/resources/` directory so +that it gets packed into your JAR file). The `prometheus.properties` file should have the following +line: ```properties -io.prometheus.metrics.histogramClassicOnly=true +io.prometheus.metrics.histogram_classic_only=true ``` or ```properties -io.prometheus.metrics.histogramNativeOnly=true +io.prometheus.metrics.histogram_native_only=true ``` -Future releases will add more configuration options, like support for configuration via environment variable`IO_PROMETHEUS_METRICS_HISTOGRAM_NATIVE_ONLY=true`. +Future releases will add more configuration options, like support for configuration via environment +variable`IO_PROMETHEUS_METRICS_HISTOGRAM_NATIVE_ONLY=true`. diff --git a/docs/content/getting-started/quickstart.md b/docs/content/getting-started/quickstart.md index 3bf5364d9..920d89b7f 100644 --- a/docs/content/getting-started/quickstart.md +++ b/docs/content/getting-started/quickstart.md @@ -1,50 +1,152 @@ --- title: Quickstart -weight: 1 +weight: 0 --- This tutorial shows the quickest way to get started with the Prometheus Java metrics library. -# Dependencies +{{< toc >}} + +## Dependencies We use the following dependencies: -* `prometheus-metrics-core` is the actual metrics library. -* `prometheus-metrics-instrumentation-jvm` provides out-of-the-box JVM metrics. -* `prometheus-metrics-exporter-httpserver` is a standalone HTTP server for exposing Prometheus metrics. -{{< tabs "uniqueid" >}} -{{< tab "Gradle" >}} -``` -implementation 'io.prometheus:prometheus-metrics-core:1.0.0' -implementation 'io.prometheus:prometheus-metrics-instrumentation-jvm:1.0.0' -implementation 'io.prometheus:prometheus-metrics-exporter-httpserver:1.0.0' +- `prometheus-metrics-core` is the actual metrics library. +- `prometheus-metrics-instrumentation-jvm` provides out-of-the-box JVM metrics. +- `prometheus-metrics-exporter-httpserver` is a standalone HTTP server for exposing Prometheus + metrics. + {{< tabs "deps" >}} + {{< tab "Gradle" >}} + +```groovy +implementation 'io.prometheus:prometheus-metrics-core:$version' +implementation 'io.prometheus:prometheus-metrics-instrumentation-jvm:$version' +implementation 'io.prometheus:prometheus-metrics-exporter-httpserver:$version' ``` + {{< /tab >}} {{< tab "Maven" >}} + ```xml io.prometheus prometheus-metrics-core - 1.0.0 + $version io.prometheus prometheus-metrics-instrumentation-jvm - 1.0.0 + $version io.prometheus prometheus-metrics-exporter-httpserver - 1.0.0 + $version ``` + {{< /tab >}} {{< /tabs >}} -There are alternative exporters as well, for example if you are using a Servlet container like Tomcat or Undertow you might want to use `prometheus-exporter-servlet-jakarta` rather than a standalone HTTP server. +There are alternative exporters as well, for example if you are using a Servlet container like +Tomcat or Undertow you might want to use `prometheus-exporter-servlet-jakarta` rather than a +standalone HTTP server. +{{< hint type=note >}} -# Example Application +If you do not use the protobuf exposition format, you can +[exclude]({{< relref "../exporters/formats.md#exclude-protobuf-exposition-format" >}}) +it from the dependencies. + +{{< /hint >}} + +## Dependency management + +A Bill of Material +([BOM](https://maven.apache.org/guides/introduction/introduction-to-dependency-mechanism.html#bill-of-materials-bom-poms)) +ensures that versions of dependencies (including transitive ones) are aligned. +This is especially important when using Spring Boot, which manages some of the dependencies of the +project. + +You should omit the version number of the dependencies in your build file if you are using a BOM. + +{{< tabs "bom" >}} +{{< tab "Gradle" >}} + +You have two ways to import a BOM. + +First, you can use the Gradle’s native BOM support by adding `dependencies`: + +```kotlin +import org.springframework.boot.gradle.plugin.SpringBootPlugin + +plugins { + id("java") + id("org.springframework.boot") version "3.2.O" // if you are using Spring Boot +} + +dependencies { + implementation(platform(SpringBootPlugin.BOM_COORDINATES)) // if you are using Spring Boot + implementation(platform("io.prometheus:prometheus-metrics-bom:$version")) +} +``` + +The other way with Gradle is to use `dependencyManagement`: + +```kotlin +plugins { + id("java") + id("org.springframework.boot") version "3.2.O" // if you are using Spring Boot + id("io.spring.dependency-management") version "1.1.0" // if you are using Spring Boot +} + +dependencyManagement { + imports { + mavenBom("io.prometheus:prometheus-metrics-bom:$version") + } +} +``` + +{{< hint type=note >}} + +Be careful not to mix up the different ways of configuring things with Gradle. +For example, don't use +`implementation(platform("io.prometheus:prometheus-metrics-bom:$version"))` +with the `io.spring.dependency-management` plugin. + +{{< /hint >}} + +{{< /tab >}} +{{< tab "Maven" >}} + +{{< hint type=note >}} + +Import the Prometheus Java metrics BOMs before any other BOMs in your +project. For example, if you import the `spring-boot-dependencies` BOM, you have +to declare it after the Prometheus Java metrics BOMs. + +{{< /hint >}} + +The following example shows how to import the Prometheus Java metrics BOMs using Maven: + +```xml + + + + io.prometheus + prometheus-metrics-bom + $version + pom + import + + + +``` + +{{< /tab >}} +{{< /tabs >}} + +## Example Application ```java import io.prometheus.metrics.core.metrics.Counter; @@ -73,34 +175,38 @@ public class App { .port(9400) .buildAndStart(); - System.out.println("HTTPServer listening on port http://localhost:" + server.getPort() + "/metrics"); + System.out.println("HTTPServer listening on port http://localhost:" + + server.getPort() + "/metrics"); Thread.currentThread().join(); // sleep forever } } ``` -# Result +## Result -Run the application and view [http://localhost:9400/metrics](http://localhost:9400/metrics) with your browser to see the raw metrics. You should see the `my_count_total` metric as shown below plus the `jvm_` and `process_` metrics coming from `JvmMetrics`. +Run the application and view [http://localhost:9400/metrics](http://localhost:9400/metrics) with +your browser to see the raw metrics. You should see the `my_count_total` metric as shown below plus +the `jvm_` and `process_` metrics coming from `JvmMetrics`. -``` +```text # HELP my_count_total example counter # TYPE my_count_total counter my_count_total{status="error"} 1.0 my_count_total{status="ok"} 2.0 ``` -# Prometheus Configuration +## Prometheus Configuration -To scrape the metrics with a Prometheus server, download the latest Prometheus server [release](https://github.com/prometheus/prometheus/releases), and configure the `prometheus.yml` file as follows: +To scrape the metrics with a Prometheus server, download the latest Prometheus +server [release](https://github.com/prometheus/prometheus/releases), and configure the +`prometheus.yml` file as follows: ```yaml global: scrape_interval: 10s # short interval for manual testing scrape_configs: - - job_name: "java-example" static_configs: - targets: ["localhost:9400"] diff --git a/docs/content/getting-started/registry.md b/docs/content/getting-started/registry.md index 7ae1bcbff..7f561ecef 100644 --- a/docs/content/getting-started/registry.md +++ b/docs/content/getting-started/registry.md @@ -3,10 +3,10 @@ title: Registry weight: 2 --- -In order to expose metrics, you need to register them with a `PrometheusRegistry`. We are using a counter as an example here, but the `register()` method is the same for all metric types. +In order to expose metrics, you need to register them with a `PrometheusRegistry`. We are using a +counter as an example here, but the `register()` method is the same for all metric types. -Registering a Metrics with the Default Registry ------------------------------------------------ +## Registering a Metric with the Default Registry ```java Counter eventsTotal = Counter.builder() @@ -15,10 +15,10 @@ Counter eventsTotal = Counter.builder() .register(); // <-- implicitly uses PrometheusRegistry.defaultRegistry ``` -The `register()` call above builds the counter and registers it with the global static `PrometheusRegistry.defaultRegistry`. Using the default registry is recommended. +The `register()` call above builds the counter and registers it with the global static +`PrometheusRegistry.defaultRegistry`. Using the default registry is recommended. -Registering a Metrics with a Custom Registry --------------------------------------------- +## Registering a Metric with a Custom Registry You can also register your metric with a custom registry: @@ -31,10 +31,10 @@ Counter eventsTotal = Counter.builder() .register(myRegistry); ``` -Registering a Metric with Multiple Registries ---------------------------------------------- +## Registering a Metric with Multiple Registries -As an alternative to calling `register()` directly, you can `build()` metrics without registering them, +As an alternative to calling `register()` directly, you can `build()` metrics without registering +them, and register them later: ```java @@ -51,7 +51,7 @@ Counter eventsTotal = Counter.builder() PrometheusRegistry.defaultRegistry.register(eventsTotal); // register the counter with a custom registry. -// This is ok, you can register a metric with multiple registries. +// This is OK, you can register a metric with multiple registries. PrometheusRegistry myRegistry = new PrometheusRegistry(); myRegistry.register(eventsTotal); @@ -60,10 +60,10 @@ myRegistry.register(eventsTotal); Custom registries are useful if you want to maintain different scopes of metrics, like a debug registry with a lot of metrics, and a default registry with only a few metrics. -IllegalArgumentException: Duplicate Metric Name in Registry ------------------------------------------------------------ +## IllegalArgumentException: Duplicate Metric Name in Registry -While it is ok to register the same metric with multiple registries, it is illegal to register the same metric name multiple times with the same registry. +While it is OK to register the same metric with multiple registries, it is illegal to register the +same metric name multiple times with the same registry. The following code will throw an `IllegalArgumentException`: ```java @@ -75,15 +75,33 @@ Counter eventsTotal1 = Counter.builder() Counter eventsTotal2 = Counter.builder() .name("events_total") .help("Total number of events") - .register(); // <-- IllegalArgumentException, because a metric with that name is already registered + .register(); // IllegalArgumentException, because a metric with that name is already registered ``` -Unregistering a Metric ----------------------- +## Validation at registration only -There is no automatic expiry of unused metrics (yet), once a metric is registered it will remain registered forever. +Validation of duplicate metric names and label schemas happens at registration time only. +Built-in metrics (Counter, Gauge, Histogram, etc.) participate in this validation. -However, you can programmatically unregistered an obsolete metric like this: +Custom collectors that implement the `Collector` or `MultiCollector` interface can optionally +implement `getPrometheusName()` and `getMetricType()` (and the MultiCollector per-name variants) so +the registry can enforce consistency. **Validation is skipped when metric name or type is +unavailable:** if `getPrometheusName()` or `getMetricType()` returns `null`, the registry does not +validate that collector. If two such collectors produce the same metric name and same label set at +scrape time, the exposition output may contain duplicate time series and be invalid for Prometheus. + +When validation _is_ performed (name and type are non-null), **null label names are treated as an +empty label schema:** `getLabelNames()` returning `null` is normalized to `Collections.emptySet()` +and full label-schema validation and duplicate detection still apply. A collector that returns a +non-null type but leaves `getLabelNames()` as `null` is still validated, with its labels treated as +empty. + +## Unregistering a Metric + +There is no automatic expiry of unused metrics (yet), once a metric is registered it will remain +registered forever. + +However, you can programmatically unregister an obsolete metric like this: ```java PrometheusRegistry.defaultRegistry.unregister(eventsTotal); diff --git a/docs/content/instrumentation/caffeine.md b/docs/content/instrumentation/caffeine.md new file mode 100644 index 000000000..104a9b9fa --- /dev/null +++ b/docs/content/instrumentation/caffeine.md @@ -0,0 +1,128 @@ +--- +title: Caffeine Cache +weight: 1 +--- + +The Caffeine instrumentation module, added in version 1.3.2, translates observability data +provided by caffeine `Cache` objects into prometheus metrics. + +{{< tabs "uniqueid" >}} +{{< tab "Gradle" >}} + +```groovy +implementation 'io.prometheus:prometheus-metrics-instrumentation-caffeine:1.3.2' +``` + +{{< /tab >}} +{{< tab "Maven" >}} + +```xml + + io.prometheus + prometheus-metrics-instrumentation-caffeine + 1.3.2 + +``` + +{{< /tab >}} +{{< /tabs >}} + +In order to collect metrics: + +- A single `CacheMetricsCollector` instance must be registered with the registry; + - Multiple `CacheMetricsCollector` instances cannot be registered with the same registry; +- The `Cache` object must be instantiated with the `recordStats()` option, and then added to the + `CacheMetricsCollector` instance with a unique name, which will be used as the value of the + `cache` label on the exported metrics; + - If the `recordStats` option is not set, most metrics will only return zero values; + +```java +var cache = Caffeine.newBuilder().recordStats().build(); +var cacheMetrics = CacheMetricsCollector.builder().build(); +PrometheusRegistry.defaultRegistry.register(cacheMetrics); +cacheMetrics.addCache("mycache", cache); +``` + +{{< hint type=note >}} + +In version 1.3.5 and older of the caffeine instrumentation library, `CacheMetricsCollector.builder` +does not exist, i.e. a constructor call `new CacheMetricsCollector()` is the only option. + +{{< /hint >}} + +All example metrics on this page will use the `mycache` label value. + +## Generic Cache Metrics + +For all cache instances, the following metrics will be available: + +```text +# TYPE caffeine_cache_hit counter +# HELP caffeine_cache_hit Cache hit totals +caffeine_cache_hit_total{cache="mycache"} 10.0 +# TYPE caffeine_cache_miss counter +# HELP caffeine_cache_miss Cache miss totals +caffeine_cache_miss_total{cache="mycache"} 3.0 +# TYPE caffeine_cache_requests counter +# HELP caffeine_cache_requests Cache request totals, hits + misses +caffeine_cache_requests_total{cache="mycache"} 13.0 +# TYPE caffeine_cache_eviction counter +# HELP caffeine_cache_eviction Cache eviction totals, doesn't include manually removed entries +caffeine_cache_eviction_total{cache="mycache"} 1.0 +# TYPE caffeine_cache_estimated_size +# HELP caffeine_cache_estimated_size Estimated cache size +caffeine_cache_estimated_size{cache="mycache"} 5.0 +``` + +## Loading Cache Metrics + +If the cache is an instance of `LoadingCache`, i.e. it is built with a `loader` function that is +managed by the cache library, then metrics for observing load time and load failures become +available: + +```text +# TYPE caffeine_cache_load_failure counter +# HELP caffeine_cache_load_failure Cache load failures +caffeine_cache_load_failure_total{cache="mycache"} 10.0 +# TYPE caffeine_cache_loads counter +# HELP caffeine_cache_loads Cache loads: both success and failures +caffeine_cache_loads_total{cache="mycache"} 3.0 +# TYPE caffeine_cache_load_duration_seconds summary +# HELP caffeine_cache_load_duration_seconds Cache load duration: both success and failures +caffeine_cache_load_duration_seconds_count{cache="mycache"} 7.0 +caffeine_cache_load_duration_seconds_sum{cache="mycache"} 0.0034 +``` + +## Weighted Cache Metrics + +Two metrics exist for observability specifically of caches that define a `weigher`: + +```text +# TYPE caffeine_cache_eviction_weight counter +# HELP caffeine_cache_eviction_weight Weight of evicted cache entries, doesn't include manually removed entries // editorconfig-checker-disable-line + +caffeine_cache_eviction_weight_total{cache="mycache"} 5.0 +# TYPE caffeine_cache_weighted_size gauge +# HELP caffeine_cache_weighted_size Approximate accumulated weight of cache entries +caffeine_cache_weighted_size{cache="mycache"} 30.0 +``` + +{{< hint type=note >}} + +`caffeine_cache_weighted_size` is available only if the cache instance defines a `maximumWeight`. + +{{< /hint >}} + +Up to version 1.3.5 and older, the weighted metrics had a different behavior: + +- `caffeine_cache_weighted_size` was not implemented; +- `caffeine_cache_eviction_weight` was exposed as a `gauge`; + +It is possible to restore the behavior of version 1.3.5 and older, by either: + +- Using the deprecated `new CacheMetricsCollector()` constructor; +- Using the flags provided on the `CacheMetricsCollector.Builder` object to opt-out of each of the + elements of the post-1.3.5 behavior: + - `builder.collectWeightedSize(false)` will disable collection of `caffeine_cache_weighted_size`; + - `builder.collectEvictionWeightAsCounter(false)` will expose `caffeine_cache_eviction_weight` as + a `gauge` metric; diff --git a/docs/content/instrumentation/guava.md b/docs/content/instrumentation/guava.md new file mode 100644 index 000000000..ffc8f0ab2 --- /dev/null +++ b/docs/content/instrumentation/guava.md @@ -0,0 +1,87 @@ +--- +title: Guava Cache +weight: 1 +--- + +The Guava instrumentation module, added in version 1.3.2, translates observability data +provided by Guava `Cache` objects into prometheus metrics. + +{{< tabs "uniqueid" >}} +{{< tab "Gradle" >}} + +```groovy +implementation 'io.prometheus:prometheus-metrics-instrumentation-guava:1.3.2' +``` + +{{< /tab >}} +{{< tab "Maven" >}} + +```xml + + io.prometheus + prometheus-metrics-instrumentation-guava + 1.3.2 + +``` + +{{< /tab >}} +{{< /tabs >}} + +In order to collect metrics: + +- A single `CacheMetricsCollector` instance must be registered with the registry; + - Multiple `CacheMetricsCollector` instances cannot be registered with the same registry; +- The `Cache` object must be instantiated with the `recordStats()` option, and then added to the + `CacheMetricsCollector` instance with a unique name, which will be used as the value of the + `cache` label on the exported metrics; + - If the `recordStats` option is not set, most metrics will only return zero values; + +```java +var cache = CacheBuilder.newBuilder().recordStats().build(); +var cacheMetrics = new CacheMetricsCollector(); +PrometheusRegistry.defaultRegistry.register(cacheMetrics); +cacheMetrics.addCache("mycache", cache); +``` + +All example metrics on this page will use the `mycache` label value. + +## Generic Cache Metrics + +For all cache instances, the following metrics will be available: + +```text +# TYPE guava_cache_hit counter +# HELP guava_cache_hit Cache hit totals +guava_cache_hit_total{cache="mycache"} 10.0 +# TYPE guava_cache_miss counter +# HELP guava_cache_miss Cache miss totals +guava_cache_miss_total{cache="mycache"} 3.0 +# TYPE guava_cache_requests counter +# HELP guava_cache_requests Cache request totals +guava_cache_requests_total{cache="mycache"} 13.0 +# TYPE guava_cache_eviction counter +# HELP guava_cache_eviction Cache eviction totals, doesn't include manually removed entries +guava_cache_eviction_total{cache="mycache"} 1.0 +# TYPE guava_cache_size +# HELP guava_cache_size Cache size +guava_cache_size{cache="mycache"} 5.0 +``` + +## Loading Cache Metrics + +If the cache is an instance of `LoadingCache`, i.e. it is built with a `loader` function that is +managed by the cache library, then metrics for observing load time and load failures become +available: + +```text +# TYPE guava_cache_load_failure counter +# HELP guava_cache_load_failure Cache load failures +guava_cache_load_failure_total{cache="mycache"} 10.0 +# TYPE guava_cache_loads counter +# HELP guava_cache_loads Cache loads: both success and failures +guava_cache_loads_total{cache="mycache"} 3.0 +# TYPE guava_cache_load_duration_seconds summary +# HELP guava_cache_load_duration_seconds Cache load duration: both success and failures +guava_cache_load_duration_seconds_count{cache="mycache"} 7.0 +guava_cache_load_duration_seconds_sum{cache="mycache"} 0.0034 +``` diff --git a/docs/content/instrumentation/jvm.md b/docs/content/instrumentation/jvm.md index 4b6f90cc7..a9a15341f 100644 --- a/docs/content/instrumentation/jvm.md +++ b/docs/content/instrumentation/jvm.md @@ -3,15 +3,29 @@ title: JVM weight: 1 --- -The JVM instrumentation module provides a variety of out-of-the-box JVM and process metrics. To use it, add the following dependency: +{{< hint type=note >}} + +Looking for JVM metrics that follow OTel semantic +conventions? See +[OTel JVM Runtime Metrics]({{< relref "../otel/jvm-runtime-metrics.md" >}}) +for an alternative based on OpenTelemetry's +runtime-telemetry module. + +{{< /hint >}} + +The JVM instrumentation module provides a variety of out-of-the-box JVM and process metrics. To use +it, add the following dependency: {{< tabs "uniqueid" >}} {{< tab "Gradle" >}} -``` + +```groovy implementation 'io.prometheus:prometheus-metrics-instrumentation-jvm:1.0.0' ``` + {{< /tab >}} {{< tab "Maven" >}} + ```xml io.prometheus @@ -19,6 +33,7 @@ implementation 'io.prometheus:prometheus-metrics-instrumentation-jvm:1.0.0' 1.0.0 ``` + {{< /tab >}} {{< /tabs >}} @@ -28,16 +43,23 @@ Now, you can register the JVM metrics as follows: JvmMetrics.builder().register(); ``` -The line above will initialize all JVM metrics and register them with the default registry. If you want to register the metrics with a custom `PrometheusRegistry`, you can pass the registry as parameter to the `register()` call. +The line above will initialize all JVM metrics and register them with the default registry. If you +want to register the metrics with a custom `PrometheusRegistry`, you can pass the registry as +parameter to the `register()` call. -The sections below describe the individual classes providing JVM metrics. If you don't want to register all JVM metrics, you can register each of these classes individually rather than using `JvmMetrics`. +The sections below describe the individual classes providing JVM metrics. If you don't want to +register all JVM metrics, you can register each of these classes individually rather than using +`JvmMetrics`. -JVM Buffer Pool Metrics ------------------------ +## JVM Buffer Pool Metrics -JVM buffer pool metrics are provided by the [JvmBufferPoolMetrics](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmBufferPoolMetrics.html) class. The data is coming from the [BufferPoolMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/BufferPoolMXBean.html). Example metrics: +JVM buffer pool metrics are provided by +the [JvmBufferPoolMetrics](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmBufferPoolMetrics.html) +class. The data is coming from +the [BufferPoolMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/BufferPoolMXBean.html). +Example metrics: -``` +```text # HELP jvm_buffer_pool_capacity_bytes Bytes capacity of a given JVM buffer pool. # TYPE jvm_buffer_pool_capacity_bytes gauge jvm_buffer_pool_capacity_bytes{pool="direct"} 8192.0 @@ -52,12 +74,17 @@ jvm_buffer_pool_used_bytes{pool="direct"} 8192.0 jvm_buffer_pool_used_bytes{pool="mapped"} 0.0 ``` -JVM Class Loading Metrics -------------------------- +## JVM Class Loading Metrics -JVM class loading metrics are provided by the [JvmClassLoadingMetrics](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmClassLoadingMetrics.html) class. The data is coming from the [ClassLoadingMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/ClassLoadingMXBean.html). Example metrics: +JVM class loading metrics are provided by +the [JvmClassLoadingMetrics](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmClassLoadingMetrics.html) +class. The data is coming from +the [ClassLoadingMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/ClassLoadingMXBean.html). +Example metrics: -``` + + +```text # HELP jvm_classes_currently_loaded The number of classes that are currently loaded in the JVM # TYPE jvm_classes_currently_loaded gauge jvm_classes_currently_loaded 1109.0 @@ -69,23 +96,35 @@ jvm_classes_loaded_total 1109.0 jvm_classes_unloaded_total 0.0 ``` -JVM Compilation Metrics ------------------------ + -JVM compilation metrics are provided by the [JvmCompilationMetrics](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmCompilationMetrics.html) class. The data is coming from the [CompilationMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/CompilationMXBean.html). Example metrics: +## JVM Compilation Metrics -``` +JVM compilation metrics are provided by +the [JvmCompilationMetrics](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmCompilationMetrics.html) +class. The data is coming from +the [CompilationMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/CompilationMXBean.html). +Example metrics: + + + +```text # HELP jvm_compilation_time_seconds_total The total time in seconds taken for HotSpot class compilation # TYPE jvm_compilation_time_seconds_total counter jvm_compilation_time_seconds_total 0.152 ``` -JVM Garbage Collector Metrics ------------------------------ + -JVM garbage collector metrics are provided by the [JvmGarbageCollectorMetric](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmGarbageCollectorMetrics.html) class. The data is coming from the [GarbageCollectorMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/GarbageCollectorMXBean.html). Example metrics: +## JVM Garbage Collector Metrics -``` +JVM garbage collector metrics are provided by +the [JvmGarbageCollectorMetric](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmGarbageCollectorMetrics.html) +class. The data is coming from +the [GarbageCollectorMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/GarbageCollectorMXBean.html). +Example metrics: + +```text # HELP jvm_gc_collection_seconds Time spent in a given JVM garbage collector in seconds. # TYPE jvm_gc_collection_seconds summary jvm_gc_collection_seconds_count{gc="PS MarkSweep"} 0 @@ -94,12 +133,18 @@ jvm_gc_collection_seconds_count{gc="PS Scavenge"} 0 jvm_gc_collection_seconds_sum{gc="PS Scavenge"} 0.0 ``` -JVM Memory Metrics ------------------- +## JVM Memory Metrics -JVM memory metrics are provided by the [JvmMemoryMetrics](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmMemoryMetrics.html) class. The data is coming from the [MemoryMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/MemoryMXBean.html) and the [MemoryPoolMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html). Example metrics: +JVM memory metrics are provided by +the [JvmMemoryMetrics](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmMemoryMetrics.html) +class. The data is coming from +the [MemoryMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/MemoryMXBean.html) +and the [MemoryPoolMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html). +Example metrics: -``` + + +```text # HELP jvm_memory_committed_bytes Committed (bytes) of a given JVM memory area. # TYPE jvm_memory_committed_bytes gauge jvm_memory_committed_bytes{area="heap"} 4.98597888E8 @@ -173,12 +218,20 @@ jvm_memory_used_bytes{area="heap"} 9051232.0 jvm_memory_used_bytes{area="nonheap"} 1.1490688E7 ``` -JVM Memory Pool Allocation Metrics ----------------------------------- + -JVM memory pool allocation metrics are provided by the [JvmMemoryPoolAllocationMetrics](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmMemoryPoolAllocationMetrics.html) class. The data is obtained by adding a [NotificationListener](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/javax/management/NotificationListener.html) to the [GarbageCollectorMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/GarbageCollectorMXBean.html). Example metrics: +## JVM Memory Pool Allocation Metrics -``` +JVM memory pool allocation metrics are provided by +the [JvmMemoryPoolAllocationMetrics](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmMemoryPoolAllocationMetrics.html) +class. The data is obtained by adding +a [NotificationListener](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/javax/management/NotificationListener.html) +to the [GarbageCollectorMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/GarbageCollectorMXBean.html). +Example metrics: + + + +```text # HELP jvm_memory_pool_allocated_bytes_total Total bytes allocated in a given JVM memory pool. Only updated after GC, not continuously. # TYPE jvm_memory_pool_allocated_bytes_total counter jvm_memory_pool_allocated_bytes_total{pool="Code Cache"} 4336448.0 @@ -189,24 +242,36 @@ jvm_memory_pool_allocated_bytes_total{pool="PS Old Gen"} 1428888.0 jvm_memory_pool_allocated_bytes_total{pool="PS Survivor Space"} 4115280.0 ``` -JVM Runtime Info Metric ------------------------ + +## JVM Runtime Info Metric -The JVM runtime info metric is provided by the [JvmRuntimeInfoMetric](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmRuntimeInfoMetric.html) class. The data is obtained via system properties and will not change throughout the lifetime of the application. Example metric: +The JVM runtime info metric is provided by +the [JvmRuntimeInfoMetric](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmRuntimeInfoMetric.html) +class. The data is obtained via system properties and will not change throughout the lifetime of the +application. Example metric: -``` + + +```text # TYPE jvm_runtime info # HELP jvm_runtime JVM runtime info jvm_runtime_info{runtime="OpenJDK Runtime Environment",vendor="Oracle Corporation",version="1.8.0_382-b05"} 1 ``` -JVM Thread Metrics ------------------- + -JVM thread metrics are provided by the [JvmThreadsMetrics](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmThreadsMetrics.html) class. The data is coming from the [ThreadMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/ThreadMXBean.html). Example metrics: +## JVM Thread Metrics -``` +JVM thread metrics are provided by +the [JvmThreadsMetrics](/client_java/api/io/prometheus/metrics/instrumentation/jvm/JvmThreadsMetrics.html) +class. The data is coming from +the [ThreadMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/ThreadMXBean.html). +Example metrics: + + + +```text # HELP jvm_threads_current Current thread count of a JVM # TYPE jvm_threads_current gauge jvm_threads_current 10.0 @@ -236,12 +301,23 @@ jvm_threads_state{state="UNKNOWN"} 0.0 jvm_threads_state{state="WAITING"} 3.0 ``` -Process Metrics ---------------- + -Process metrics are provided by the [ProcessMetrics](/client_java/api/io/prometheus/metrics/instrumentation/jvm/ProcessMetrics.html) class. The data is coming from the [OperatingSystemMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/OperatingSystemMXBean.html), the [RuntimeMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/RuntimeMXBean.html), and from the `/proc/self/status` file on Linux. The metrics with prefix `process_` are not specific to Java, but should be provided by every Prometheus client library, see [Process Metrics](https://prometheus.io/docs/instrumenting/writing_clientlibs/#process-metrics) in the Prometheus [writing client libraries](https://prometheus.io/docs/instrumenting/writing_clientlibs/#process-metrics) documentation. Example metrics: +## Process Metrics -``` +Process metrics are provided by +the [ProcessMetrics](/client_java/api/io/prometheus/metrics/instrumentation/jvm/ProcessMetrics.html) +class. The data is coming from +the [OperatingSystemMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/OperatingSystemMXBean.html), +the [RuntimeMXBean](https://docs.oracle.com/en/java/javase/21/docs/api/java.management/java/lang/management/RuntimeMXBean.html), +and from the `/proc/self/status` file on Linux. The metrics with prefix `process_` are not specific +to Java, but should be provided by every Prometheus client library, +see [Process Metrics](https://prometheus.io/docs/instrumenting/writing_clientlibs/#process-metrics) +in the +Prometheus [writing client libraries](https://prometheus.io/docs/instrumenting/writing_clientlibs/#process-metrics) +documentation. Example metrics: + +```text # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter process_cpu_seconds_total 1.63 diff --git a/docs/content/internals/model.md b/docs/content/internals/model.md index e02517777..e1b2af644 100644 --- a/docs/content/internals/model.md +++ b/docs/content/internals/model.md @@ -7,25 +7,41 @@ The illustration below shows the internal architecture of the Prometheus Java cl ![Internal architecture of the Prometheus Java client library](/client_java/images/model.png) -prometheus-metrics-core ------------------------ +## prometheus-metrics-core -This is the user facing metrics library, implementing the core metric types, like [Counter](/client_java/api/io/prometheus/metrics/core/metrics/Counter.html), [Gauge](/client_java/api/io/prometheus/metrics/core/metrics/Gauge.html) [Histogram](/client_java/api/io/prometheus/metrics/core/metrics/Histogram.html), and so on. +This is the user facing metrics library, implementing the core metric types, +like [Counter](/client_java/api/io/prometheus/metrics/core/metrics/Counter.html), +[Gauge](/client_java/api/io/prometheus/metrics/core/metrics/Gauge.html) +[Histogram](/client_java/api/io/prometheus/metrics/core/metrics/Histogram.html), +and so on. -All metric types implement the [Collector](/client_java/api/io/prometheus/metrics/model/registry/Collector.html) interface, i.e. they provide a [collect()](/client_java/api/io/prometheus/metrics/model/registry/Collector.html#collect()) method to produce snapshots. +All metric types implement +the [Collector](/client_java/api/io/prometheus/metrics/model/registry/Collector.html) interface, +i.e. they provide +a [collect()]() +method to produce snapshots. Implementers that do not provide metric type or label names (returning +null from `getMetricType()` and `getLabelNames()`) are not validated at registration; they must +avoid producing the same metric name and label schema as another collector, or exposition may be +invalid. -prometheus-metrics-model ------------------------- +## prometheus-metrics-model -The model is an internal library, implementing read-only immutable snapshots. These snapshots are returned by the [Collector.collect()](/client_java/api/io/prometheus/metrics/model/registry/Collector.html#collect()) method. +The model is an internal library, implementing read-only immutable snapshots. These snapshots are +returned by +the [Collector.collect()]() +method. -There is no need for users to use `prometheus-metrics-model` directly. Users should use the API provided by `prometheus-metrics-core`, which includes the core metrics as well as callback metrics. +There is no need for users to use `prometheus-metrics-model` directly. Users should use the API +provided by `prometheus-metrics-core`, which includes the core metrics as well as callback metrics. -However, maintainers of 3rd party metrics libraries might want to use `prometheus-metrics-model` if they want to add Prometheus exposition formats to their metrics library. +However, maintainers of third-party metrics libraries might want to use `prometheus-metrics-model` +if they want to add Prometheus exposition formats to their metrics library. -exporters and exposition formats --------------------------------- +## Exporters and exposition formats -The `prometheus-metrics-exposition-formats` module converts snapshots to Prometheus exposition formats, like text format, OpenMetrics text format, or Prometheus protobuf format. +The `prometheus-metrics-exposition-formats` module converts snapshots to Prometheus exposition +formats, like text format, OpenMetrics text format, or Prometheus protobuf format. -The exporters like `prometheus-metrics-exporter-httpserver` or `prometheus-metrics-exporter-servlet-jakarta` use this to convert snapshots into the right format depending on the `Accept` header in the scrape request. +The exporters like `prometheus-metrics-exporter-httpserver` or +`prometheus-metrics-exporter-servlet-jakarta` use this to convert snapshots into the right format +depending on the `Accept` header in the scrape request. diff --git a/docs/content/migration/simpleclient.md b/docs/content/migration/simpleclient.md index a954c9c04..6d0580571 100644 --- a/docs/content/migration/simpleclient.md +++ b/docs/content/migration/simpleclient.md @@ -3,34 +3,53 @@ title: Simpleclient weight: 1 --- -The Prometheus Java client library 1.0.0 is a complete rewrite of the underlying data model, and is not backwards compatible with releases 0.16.0 and older for a variety of reasons: - -* The old data model was based on [OpenMetrics](https://openmetrics.io). Native histograms don't fit with the OpenMetrics model because they don't follow the "every sample has exactly one double value" paradigm. It was a lot cleaner to implement a dedicated `prometheus-metrics-model` than trying to fit native histograms into the existing OpenMetrics-based model. -* Version 0.16.0 and older has multiple Maven modules sharing the same Java package name. This is not supported by the Java module system. To support users of Java modules, we renamed all packages and made sure no package is reused across multiple Maven modules. - -Migration using the Simpleclient Bridge ---------------------------------------- - -Good news: Users of version 0.16.0 and older do not need to refactor all their instrumentation code to get started with 1.0.0. - -We provide a migration module for bridging the old simpleclient `CollectorRegistry` to the new `PromethesuRegistry`. +The Prometheus Java client library 1.0.0 is a complete rewrite of the underlying data model, and is +not backward +compatible with releases 0.16.0 and older for a variety of reasons: + +- The old data model was based on [OpenMetrics](https://openmetrics.io). Native histograms don't fit + with the + OpenMetrics model because they don't follow the "every sample has exactly one double value" + paradigm. It was a lot + cleaner to implement a dedicated `prometheus-metrics-model` than trying to fit native histograms + into the existing + OpenMetrics-based model. +- Version 0.16.0 and older has multiple Maven modules sharing the same Java package name. This is + not supported by the + Java module system. To support users of Java modules, we renamed all packages and made sure no + package is reused + across multiple Maven modules. + +## Migration using the Simpleclient Bridge + +Good news: Users of version 0.16.0 and older do not need to refactor all their instrumentation code +to get started with +1.0.0. + +We provide a migration module for bridging the old simpleclient `CollectorRegistry` to the new +`PrometheusRegistry`. To use the bridge, add the following dependency: {{< tabs "uniqueid" >}} {{< tab "Gradle" >}} -``` + +```groovy implementation 'io.prometheus:prometheus-metrics-simpleclient-bridge:1.0.0' ``` + {{< /tab >}} {{< tab "Maven" >}} + ```xml + - io.prometheus - prometheus-metrics-simpleclient-bridge - 1.0.0 + io.prometheus + prometheus-metrics-simpleclient-bridge + 1.0.0 ``` + {{< /tab >}} {{< /tabs >}} @@ -40,7 +59,9 @@ Then add the following to your code: SimpleclientCollector.builder().register(); ``` -This will make all metrics registered with simpleclient's `CollectorRegistry.defaultRegistry` available in the new `PrometheusRegistry.defaultRegistry`. +This will make all metrics registered with simpleclient's `CollectorRegistry.defaultRegistry` +available in the new +`PrometheusRegistry.defaultRegistry`. If you are using custom registries, you can specify them like this: @@ -49,23 +70,27 @@ CollectorRegistry simpleclientRegistry = ...; PrometheusRegistry prometheusRegistry = ...; SimpleclientCollector.builder() - .collectorRegistry(simpleclientRegistry) - .register(prometheusRegistry); + .collectorRegistry(simpleclientRegistry) + .register(prometheusRegistry); ``` -Refactoring the Instrumentation Code ------------------------------------- +## Refactoring the Instrumentation Code -If you decide to get rid of the old 0.16.0 dependencies and use 1.0.0 only, you need to refactor your code: +If you decide to get rid of the old 0.16.0 dependencies and use 1.0.0 only, you need to refactor +your code: Dependencies: -* `simpleclient` -> `prometheus-metrics-core` -* `simpleclient_hotspot` -> `prometheus-metrics-instrumentation-jvm` -* `simpleclient_httpserver` -> `prometheus-metrics-exporter-httpserver` -* `simpleclient_servlet_jakarta` -> `prometheus-metrics-exporter-servlet-jakarta` +- `simpleclient` -> `prometheus-metrics-core` +- `simpleclient_hotspot` -> `prometheus-metrics-instrumentation-jvm` +- `simpleclient_httpserver` -> `prometheus-metrics-exporter-httpserver` +- `simpleclient_servlet_jakarta` -> `prometheus-metrics-exporter-servlet-jakarta` -As long as you are using high-level metric API like `Counter`, `Gauge`, `Histogram`, and `Summary` converting code to the new API is relatively straightforward. You will need to adapt the package name and apply some minor changes like using `builder()` instead of `build()` or using `labelValues()` instead of `labels()`. +As long as you are using high-level metric API like `Counter`, `Gauge`, `Histogram`, and `Summary` +converting code to +the new API is relatively straightforward. You will need to adapt the package name and apply some +minor changes like +using `builder()` instead of `build()` or using `labelValues()` instead of `labels()`. Example of the old 0.16.0 API: @@ -73,10 +98,10 @@ Example of the old 0.16.0 API: import io.prometheus.client.Counter; Counter counter = Counter.build() - .name("test") - .help("test counter") - .labelNames("path") - .register(); + .name("test") + .help("test counter") + .labelNames("path") + .register(); counter.labels("/hello-world").inc(); ``` @@ -87,14 +112,62 @@ Example of the new 1.0.0 API: import io.prometheus.metrics.core.metrics.Counter; Counter counter = Counter.builder() - .name("test") - .help("test counter") - .labelNames("path") - .register(); + .name("test") + .help("test counter") + .labelNames("path") + .register(); counter.labelValues("/hello-world").inc(); ``` -Reasons why we changed the API: Changing the package names was a necessity because the previous package names were incompatible with the Java module system. However, renaming packages requires changing code anyway, so we decided to clean up some things. For example, the name `builder()` for a builder method is very common in the Java ecosystem, it's used in Spring, Lombok, and so on. So naming the method `builder()` makes the Prometheus library more aligned with the broader Java ecosystem. +Reasons why we changed the API: Changing the package names was a necessity because the previous +package names were +incompatible with the Java module system. However, renaming packages requires changing code anyway, +so we decided to +clean up some things. For example, the name `builder()` for a builder method is very common in the +Java ecosystem, it's +used in Spring, Lombok, and so on. So naming the method `builder()` makes the Prometheus library +more aligned with the +broader Java ecosystem. + +If you are using the low level `Collector` API directly, you should have a look at the new callback +metric types, +see [/getting-started/callbacks/]({{< relref "../getting-started/callbacks.md" >}}). Chances are +good that the new callback metrics have +an easier way to achieve what you need than the old 0.16.0 code. + +## JVM Metrics + +Version 0.16.0 provided the `simpleclient_hotspot` module for exposing built-in JVM metrics: + +```java +DefaultExports.initialize(); +``` + +With version 1.0.0 these metrics moved to the `prometheus-metrics-instrumentation-jvm` module and +are initialized as follows: + +```java +JvmMetrics.builder().register(); +``` -If you are using the low level `Collector` API directly, you should have a look at the new callback metric types, see [/getting-started/callbacks/](../../getting-started/callbacks/). Chances are good that the new callback metrics have an easier way to achieve what you need than the old 0.16.0 code. +A full list of the available JVM metrics can be found +on [/instrumentation/jvm]({{< relref "../instrumentation/jvm.md" >}}). + +Most JVM metric names remained the same, except for a few cases where the old 0.16.0 metric names +were not compliant with the [OpenMetrics](https://openmetrics.io) specification. OpenMetrics +requires the unit to be a suffix, so we renamed metrics where the unit was in the middle of the +metric name and moved the unit to the end of the metric name. The following metric names changed: + +- `jvm_memory_bytes_committed` -> `jvm_memory_committed_bytes` +- `jvm_memory_bytes_init` -> `jvm_memory_init_bytes` +- `jvm_memory_bytes_max` -> `jvm_memory_max_bytes` +- `jvm_memory_pool_bytes_committed` -> `jvm_memory_pool_committed_bytes` +- `jvm_memory_pool_bytes_init` -> `jvm_memory_pool_init_bytes` +- `jvm_memory_pool_bytes_max` -> `jvm_memory_pool_max_bytes` +- `jvm_memory_pool_bytes_used` -> `jvm_memory_pool_used_bytes` +- `jvm_memory_pool_collection_bytes_committed` -> `jvm_memory_pool_collection_committed_bytes` +- `jvm_memory_pool_collection_bytes_init` -> `jvm_memory_pool_collection_init_bytes` +- `jvm_memory_pool_collection_bytes_max` -> `jvm_memory_pool_collection_max_bytes` +- `jvm_memory_pool_collection_bytes_used` -> `jvm_memory_pool_collection_used_bytes` +- `jvm_info` -> `jvm_runtime_info` diff --git a/docs/content/otel/jvm-runtime-metrics.md b/docs/content/otel/jvm-runtime-metrics.md new file mode 100644 index 000000000..d61da1861 --- /dev/null +++ b/docs/content/otel/jvm-runtime-metrics.md @@ -0,0 +1,241 @@ +--- +title: JVM Runtime Metrics +weight: 4 +--- + +OpenTelemetry's +[runtime-telemetry](https://github.com/open-telemetry/opentelemetry-java-instrumentation/tree/main/instrumentation/runtime-telemetry) +module is an alternative to +[prometheus-metrics-instrumentation-jvm]({{< relref "../instrumentation/jvm.md" >}}) +for users who want JVM metrics following OTel semantic conventions. + +Key advantages: + +- Metric names follow + [OTel semantic conventions](https://opentelemetry.io/docs/specs/semconv/runtime/jvm-metrics/) +- Java 17+ JFR support (context switches, network I/O, + lock contention, memory allocation) +- Alignment with the broader OTel ecosystem + +Since OpenTelemetry's `opentelemetry-exporter-prometheus` +already depends on this library's `PrometheusRegistry`, +no additional code is needed in this library — only the +OTel SDK wiring shown below. + +## Dependencies + +Use the [OTel Support]({{< relref "support.md" >}}) module +to pull in the OTel SDK and Prometheus exporter, then add +the runtime-telemetry instrumentation: + +{{< tabs "jvm-runtime-deps" >}} +{{< tab "Gradle" >}} + +```groovy +implementation 'io.prometheus:prometheus-metrics-otel-support:$version' + +// Use opentelemetry-runtime-telemetry-java8 (Java 8+) +// or opentelemetry-runtime-telemetry-java17 (Java 17+, JFR-based) +implementation( + 'io.opentelemetry.instrumentation:opentelemetry-runtime-telemetry-java8:$otelVersion-alpha' +) +``` + +{{< /tab >}} +{{< tab "Maven" >}} + +```xml + + io.prometheus + prometheus-metrics-otel-support + $version + pom + + + + + + io.opentelemetry.instrumentation + opentelemetry-runtime-telemetry-java8 + $otelVersion-alpha + + + +``` + +{{< /tab >}} +{{< /tabs >}} + +## Standalone Setup + +If you **only** want OTel runtime metrics exposed as +Prometheus, without any Prometheus Java client metrics: + +```java +import io.opentelemetry.exporter.prometheus.PrometheusHttpServer; +import io.opentelemetry.instrumentation.runtimemetrics.java8.RuntimeMetrics; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; + +PrometheusHttpServer prometheusServer = + PrometheusHttpServer.builder() + .setPort(9464) + .build(); + +OpenTelemetrySdk openTelemetry = + OpenTelemetrySdk.builder() + .setMeterProvider( + SdkMeterProvider.builder() + .registerMetricReader(prometheusServer) + .build()) + .build(); + +RuntimeMetrics runtimeMetrics = + RuntimeMetrics.builder(openTelemetry).build(); + +// Close on shutdown to stop metric collection and server +Runtime.getRuntime().addShutdownHook(new Thread(() -> { + runtimeMetrics.close(); + prometheusServer.close(); +})); + +// Scrape at http://localhost:9464/metrics +``` + +## Combined with Prometheus Java Client Metrics + +If you already have Prometheus Java client metrics and want to +add OTel runtime metrics to the **same** `/metrics` +endpoint, use `PrometheusMetricReader` to bridge OTel +metrics into a `PrometheusRegistry`: + +```java +import io.prometheus.metrics.core.metrics.Counter; +import io.prometheus.metrics.exporter.httpserver.HTTPServer; +import io.prometheus.metrics.model.registry.PrometheusRegistry; +import io.opentelemetry.exporter.prometheus.PrometheusMetricReader; +import io.opentelemetry.instrumentation.runtimemetrics.java8.RuntimeMetrics; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; + +PrometheusRegistry registry = + new PrometheusRegistry(); + +// Register Prometheus metrics as usual +Counter myCounter = Counter.builder() + .name("my_requests_total") + .register(registry); + +// Bridge OTel metrics into the same registry +PrometheusMetricReader reader = + PrometheusMetricReader.create(); +registry.register(reader); + +OpenTelemetrySdk openTelemetry = + OpenTelemetrySdk.builder() + .setMeterProvider( + SdkMeterProvider.builder() + .registerMetricReader(reader) + .build()) + .build(); + +RuntimeMetrics runtimeMetrics = + RuntimeMetrics.builder(openTelemetry).build(); +Runtime.getRuntime() + .addShutdownHook(new Thread(runtimeMetrics::close)); + +// Expose everything on one endpoint +HTTPServer.builder() + .port(9400) + .registry(registry) + .buildAndStart(); +``` + +The [examples/example-otel-jvm-runtime-metrics](https://github.com/prometheus/client_java/tree/main/examples/example-otel-jvm-runtime-metrics) +directory has a complete runnable example. + +## Configuration + +The `RuntimeMetricsBuilder` supports two configuration +options: + +### `captureGcCause()` + +Adds a `jvm.gc.cause` attribute to the `jvm.gc.duration` +metric, indicating why the garbage collection occurred +(e.g. `G1 Evacuation Pause`, `System.gc()`): + +```java +RuntimeMetrics.builder(openTelemetry) + .captureGcCause() + .build(); +``` + +### `emitExperimentalTelemetry()` + +Enables additional experimental metrics beyond the stable +set. These are not yet part of the OTel semantic conventions +and may change in future releases: + +- Buffer pool metrics (direct and mapped byte buffers) +- Extended CPU metrics +- Extended memory pool metrics +- File descriptor metrics + +```java +RuntimeMetrics.builder(openTelemetry) + .emitExperimentalTelemetry() + .build(); +``` + +Both options can be combined: + +```java +RuntimeMetrics.builder(openTelemetry) + .captureGcCause() + .emitExperimentalTelemetry() + .build(); +``` + +Selective per-metric registration is not supported by the +runtime-telemetry API — it is all-or-nothing with these +two toggles. + +## Java 17 JFR Support + +The `opentelemetry-runtime-telemetry-java17` variant adds +JFR-based metrics. You can selectively enable features: + +```java +import io.opentelemetry.instrumentation.runtimemetrics.java17.JfrFeature; +import io.opentelemetry.instrumentation.runtimemetrics.java17.RuntimeMetrics; + +RuntimeMetrics.builder(openTelemetry) + .enableFeature(JfrFeature.BUFFER_METRICS) + .enableFeature(JfrFeature.NETWORK_IO_METRICS) + .enableFeature(JfrFeature.LOCK_METRICS) + .enableFeature(JfrFeature.CONTEXT_SWITCH_METRICS) + .build(); +``` + +## Metric Names + +OTel metric names are converted to Prometheus format by +the exporter. Examples: + +| OTel name | Prometheus name | +| ---------------------------- | ---------------------------------- | +| `jvm.memory.used` | `jvm_memory_used_bytes` | +| `jvm.gc.duration` | `jvm_gc_duration_seconds` | +| `jvm.thread.count` | `jvm_thread_count` | +| `jvm.class.loaded` | `jvm_class_loaded` | +| `jvm.cpu.recent_utilization` | `jvm_cpu_recent_utilization_ratio` | + +See [Names]({{< relref "names.md" >}}) for full details on +how OTel names map to Prometheus names. diff --git a/docs/content/otel/names.md b/docs/content/otel/names.md index 3d25dd3d4..a5425e07f 100644 --- a/docs/content/otel/names.md +++ b/docs/content/otel/names.md @@ -3,26 +3,34 @@ title: Names weight: 3 --- -OpenTelemetry naming conventions are different from Prometheus naming conventions. The mapping from OpenTelemetry metric names to Prometheus metric names is well defined in OpenTelemetry's [Prometheus and OpenMetrics Compatibility](https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/) spec, and the [OpenTelemetryExporter](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.html) implements that specification. +OpenTelemetry naming conventions are different from Prometheus naming conventions. The mapping from +OpenTelemetry metric names to Prometheus metric names is well defined in +OpenTelemetry's [Prometheus and OpenMetrics Compatibility](https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/) +spec, and +the [OpenTelemetryExporter](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.html) +implements that specification. -The goal is, if you set up a pipeline as illustrated below, you will see the same metric names in the Prometheus server as if you had exposed Prometheus metrics directly. +The goal is, if you set up a pipeline as illustrated below, you will see the same metric names in +the Prometheus server as if you had exposed Prometheus metrics directly. -![Image of a with the Prometheus client library pushing metrics to an OpenTelemetry collector](/client_java/images/otel-pipeline.png) +![Image of a with the Prometheus client library pushing metrics to an OpenTelemetry collector](/client_java/images/otel-pipeline.png) The main steps when converting OpenTelemetry metric names to Prometheus metric names are: -* Replace dots with underscores. -* If the metric has a unit, append the unit to the metric name, like `_seconds`. -* If the metric type has a suffix, append it, like `_total` for counters. +- Escape illegal characters as described in [Unicode support] +- If the metric has a unit, append the unit to the metric name, like `_seconds`. +- If the metric type has a suffix, append it, like `_total` for counters. -Dots in Metric and Label Names ------------------------------- +## Dots in Metric and Label Names -OpenTelemetry defines not only a line protocol, but also _semantic conventions_, i.e. standardized metric and label names. For example, OpenTelemetry's [Semantic Conventions for HTTP Metrics](https://opentelemetry.io/docs/specs/otel/metrics/semantic_conventions/http-metrics/) say that if you instrument an HTTP server with OpenTelemetry, you must have a histogram named `http.server.duration`. +OpenTelemetry defines not only a line protocol, but also _semantic conventions_, i.e. standardized +metric and label names. For example, +OpenTelemetry's [Semantic Conventions for HTTP Metrics](https://opentelemetry.io/docs/specs/otel/metrics/semantic_conventions/http-metrics/) +say that if you instrument an HTTP server with OpenTelemetry, you must have a histogram named +`http.server.duration`. -Most names defined in semantic conventions use dots. In the Prometheus server, the dot is an illegal character (this might change in future versions of the Prometheus server). +Most names defined in semantic conventions use dots. +Dots in metric and label names are now supported in the Prometheus Java client library as +described in [Unicode support]. -The Prometheus Java client library allows dots, so that you can use metric names and label names as defined in OpenTelemetry's semantic conventions. -The dots will automatically be replaced with underscores if you expose metrics in Prometheus format, but you will see the original names with dots if you push your metrics in OpenTelemetry format. - -That way, you can use OTel-compliant metric and label names today when instrumenting your application with the Prometheus Java client, and you are prepared in case your monitoring backend adds features in the future that require OTel-compliant instrumentation. +[Unicode support]: {{< relref "../exporters/unicode.md" >}} diff --git a/docs/content/otel/otlp.md b/docs/content/otel/otlp.md index ea39f178f..568219dd0 100644 --- a/docs/content/otel/otlp.md +++ b/docs/content/otel/otlp.md @@ -3,19 +3,23 @@ title: OTLP weight: 1 --- -The Prometheus Java client library allows you to push metrics to an OpenTelemetry endpoint using the OTLP protocol. +The Prometheus Java client library allows you to push metrics to an OpenTelemetry endpoint using the +OTLP protocol. -![Image of a with the Prometheus client library pushing metrics to an OpenTelemetry collector](/client_java/images/otel-pipeline.png) +![Image of a with the Prometheus client library pushing metrics to an OpenTelemetry collector](/client_java/images/otel-pipeline.png) To implement this, you need to include `prometheus-metrics-exporter` as a dependency {{< tabs "uniqueid" >}} {{< tab "Gradle" >}} -``` + +```groovy implementation 'io.prometheus:prometheus-metrics-exporter-opentelemetry:1.0.0' ``` + {{< /tab >}} {{< tab "Maven" >}} + ```xml io.prometheus @@ -23,6 +27,7 @@ implementation 'io.prometheus:prometheus-metrics-exporter-opentelemetry:1.0.0' 1.0.0 ``` + {{< /tab >}} {{< /tabs >}} @@ -34,8 +39,23 @@ OpenTelemetryExporter.builder() .buildAndStart(); ``` -By default, the `OpenTelemetryExporter` will push metrics every 60 seconds to `localhost:4317` using `grpc` protocol. You can configure this in code using the [OpenTelemetryExporter.Builder](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.Builder.html), or at runtime via [`io.prometheus.exporter.opentelemetry.*`](../../config/config/#exporter-opentelemetry-properties) properties. - -In addition to the Prometheus Java client configuration, the exporter also recognizes standard OpenTelemetry configuration. For example, you can set the [OTEL_EXPORTER_OTLP_METRICS_ENDPOINT](https://opentelemetry.io/docs/concepts/sdk-configuration/otlp-exporter-configuration/#otel_exporter_otlp_metrics_endpoint) environment variable to configure the endpoint. The Javadoc for [OpenTelemetryExporter.Builder](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.Builder.html) shows which settings have corresponding OTel configuration. The intended use case is that if you attach the [OpenTelemetry Java agent](https://github.com/open-telemetry/opentelemetry-java-instrumentation/) for tracing, and use the Prometheus Java client for metrics, it is sufficient to configure the OTel agent because the Prometheus library will pick up the same configuration. - -The [examples/example-exporter-opentelemetry](https://github.com/prometheus/client_java/tree/main/examples/example-exporter-opentelemetry) folder has a docker compose with a complete end-to-end example, including a Java app, the OTel collector, and a Prometheus server. +By default, the `OpenTelemetryExporter` will push metrics every 60 seconds to `localhost:4317` using +`grpc` protocol. You can configure this in code using +the [OpenTelemetryExporter.Builder](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.Builder.html), +or at runtime via [`io.prometheus.exporter.opentelemetry.*`]({{< relref "../config/config.md#exporter-opentelemetry-properties" >}}) +properties. + +In addition to the Prometheus Java client configuration, the exporter also recognizes standard +OpenTelemetry configuration. For example, you can set +the [OTEL_EXPORTER_OTLP_METRICS_ENDPOINT](https://opentelemetry.io/docs/concepts/sdk-configuration/otlp-exporter-configuration/#otel_exporter_otlp_metrics_endpoint) +environment variable to configure the endpoint. The Javadoc +for [OpenTelemetryExporter.Builder](/client_java/api/io/prometheus/metrics/exporter/opentelemetry/OpenTelemetryExporter.Builder.html) +shows which settings have corresponding OTel configuration. The intended use case is that if you +attach the +[OpenTelemetry Java agent](https://github.com/open-telemetry/opentelemetry-java-instrumentation/) +for tracing, and use the Prometheus Java client for metrics, it is sufficient to configure the OTel +agent because the Prometheus library will pick up the same configuration. + +The [examples/example-exporter-opentelemetry](https://github.com/prometheus/client_java/tree/main/examples/example-exporter-opentelemetry) +folder has a Docker compose with a complete end-to-end example, including a Java app, the OTel +collector, and a Prometheus server. diff --git a/docs/content/otel/support.md b/docs/content/otel/support.md new file mode 100644 index 000000000..e3b8cbe3a --- /dev/null +++ b/docs/content/otel/support.md @@ -0,0 +1,47 @@ +--- +title: OTel Support +weight: 2 +--- + +The `prometheus-metrics-otel-support` module bundles the +OpenTelemetry SDK and the Prometheus exporter into a single +POM dependency. + +Use this module when you want to combine OpenTelemetry +instrumentations (e.g. JVM runtime metrics) with the +Prometheus Java client on one `/metrics` endpoint. + +## Dependencies + +{{< tabs "otel-support-deps" >}} +{{< tab "Gradle" >}} + +```groovy +implementation 'io.prometheus:prometheus-metrics-otel-support:$version' +``` + +{{< /tab >}} +{{< tab "Maven" >}} + +```xml + + io.prometheus + prometheus-metrics-otel-support + $version + pom + +``` + +{{< /tab >}} +{{< /tabs >}} + +This single dependency replaces: + +- `io.opentelemetry:opentelemetry-sdk` +- `io.opentelemetry:opentelemetry-exporter-prometheus` + +## Use Cases + +See [JVM Runtime Metrics]({{< relref "jvm-runtime-metrics.md" >}}) +for a concrete example of combining OTel JVM metrics with +the Prometheus Java client. diff --git a/docs/content/otel/tracing.md b/docs/content/otel/tracing.md index 406575a63..33180d34a 100644 --- a/docs/content/otel/tracing.md +++ b/docs/content/otel/tracing.md @@ -3,9 +3,21 @@ title: Tracing weight: 2 --- -OpenTelemetry’s [vision statement](https://github.com/open-telemetry/community/blob/main/mission-vision-values.md) says that [telemetry should be loosely coupled](https://github.com/open-telemetry/community/blob/main/mission-vision-values.md#telemetry-should-be-loosely-coupled), allowing end users to pick and choose from the pieces they want without having to bring in the rest of the project, too. In that spirit, you might choose to instrument your Java application with the Prometheus Java client library for metrics, and attach the [OpenTelemetry Java agent](https://github.com/open-telemetry/opentelemetry-java-instrumentation/) to get distributed tracing. +OpenTelemetry’s +[vision statement](https://github.com/open-telemetry/community/blob/main/mission-vision-values.md) +says that +[telemetry should be loosely coupled](https://github.com/open-telemetry/community/blob/main/mission-vision-values.md#telemetry-should-be-loosely-coupled), +allowing end users to pick and choose from the pieces they want without having to bring in the rest +of the project, too. In that spirit, you might choose to instrument your Java application with the +Prometheus Java client library for metrics, and attach the +[OpenTelemetry Java agent](https://github.com/open-telemetry/opentelemetry-java-instrumentation/) +to get distributed tracing. -First, if you attach the [OpenTelemetry Java agent](https://github.com/open-telemetry/opentelemetry-java-instrumentation/) you might want to turn off OTel's built-in metrics, because otherwise you get metrics from both the Prometheus Java client library and the OpenTelemetry agent (technically it's no problem to get both metrics, it's just not a common use case). +First, if you attach the +[OpenTelemetry Java agent](https://github.com/open-telemetry/opentelemetry-java-instrumentation/) +you might want to turn off OTel's built-in metrics, because otherwise you get metrics from both the +Prometheus Java client library and the OpenTelemetry agent (technically it's no problem to get both +metrics, it's just not a common use case). ```bash # This will tell the OpenTelemetry agent not to send metrics, just traces and logs. @@ -18,19 +30,34 @@ Now, start your application with the OpenTelemetry Java agent attached for trace java -javaagent:path/to/opentelemetry-javaagent.jar -jar myapp.jar ``` -With the OpenTelemetry Java agent attached, the Prometheus client library will do a lot of magic under the hood. +With the OpenTelemetry Java agent attached, the Prometheus client library will do a lot of magic +under the hood. -* `service.name` and `service.instance.id` are used in OpenTelemetry to uniquely identify a service instance. The Prometheus client library will automatically use the same `service.name` and `service.instance.id` as the agent when pushing metrics in OpenTelemetry format. That way the monitoring backend will see that the metrics and the traces are coming from the same instance. -* Exemplars are added automatically if a Prometheus metric is updated in the context of a distributed OpenTelemetry trace. -* If a Span is used as an Exemplar, the Span is marked with the Span attribute `exemplar="true"`. This can be used in the OpenTelemetry's sampling policy to make sure Exemplars are always sampled. +- `service.name` and `service.instance.id` are used in OpenTelemetry to uniquely identify a service + instance. The Prometheus client library will automatically use the same `service.name` and + `service.instance.id` as the agent when pushing metrics in OpenTelemetry format. That way the + monitoring backend will see that the metrics and the traces are coming from the same instance. +- Exemplars are added automatically if a Prometheus metric is updated in the context of a + distributed OpenTelemetry trace. +- If a Span is used as an Exemplar, the Span is marked with the Span attribute `exemplar="true"`. + This can be used in the OpenTelemetry's sampling policy to make sure Exemplars are always sampled. -Here's more context on the `exemplar="true"` Span attribute: Many users of tracing libraries don't keep 100% of their trace data, because traces are very repetitive. It is very common to sample only 10% of traces and discard 90%. However, this can be an issue with Exemplars: In 90% of the cases Exemplars would point to a trace that has been thrown away. +Here's more context on the `exemplar="true"` Span attribute: Many users of tracing libraries don't +keep 100% of their trace data, because traces are very repetitive. It is very common to sample only +10% of traces and discard 90%. However, this can be an issue with Exemplars: In 90% of the cases +Exemplars would point to a trace that has been thrown away. -To solve this, the Prometheus Java client library annotates each Span that has been used as an Exemplar with the `exemplar="true"` Span attribute. +To solve this, the Prometheus Java client library annotates each Span that has been used as an +Exemplar with the `exemplar="true"` Span attribute. -The sampling policy in the OpenTelemetry collector can be configured to keep traces with this attribute. There's no risk that this results in a significant increase in trace data, because new Exemplars are only selected every [`minRetentionPeriodSeconds`](../../config/config/#exemplar-properties) seconds. +The sampling policy in the OpenTelemetry collector can be configured to keep traces with this +attribute. There's no risk that this results in a significant increase in trace data, because new +Exemplars are only selected every +[`minRetentionPeriodSeconds`]({{< relref "../config/config.md#exemplar-properties" >}}) seconds. -Here's an example of how to configure OpenTelemetry's [tail sampling processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/tailsamplingprocessor/) to sample all Spans marked with `exemplar="true"`, and then discard 90% of the traces: +Here's an example of how to configure OpenTelemetry's +[tail sampling processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/tailsamplingprocessor/) +to sample all Spans marked with `exemplar="true"`, and then discard 90% of the traces: ```yaml policies: @@ -38,14 +65,13 @@ policies: { name: keep-exemplars, type: string_attribute, - string_attribute: { key: "exemplar", values: [ "true" ] } - }, - { - name: keep-10-percent, - type: probabilistic, - probabilistic: { sampling_percentage: 10 } + string_attribute: { key: "exemplar", values: ["true"] }, }, + { name: keep-10-percent, type: probabilistic, probabilistic: { sampling_percentage: 10 } }, ] ``` -The [examples/example-exemplar-tail-sampling/](https://github.com/prometheus/client_java/tree/main/examples/example-exemplars-tail-sampling) directory has a complete end-to-end example, with a distributed Java application with two services, an OpenTelemetry collector, Prometheus, Tempo as a trace database, and Grafana dashboards. Use docker-compose as described in the example's README to run the example and explore the results. +The [examples/example-exemplar-tail-sampling/](https://github.com/prometheus/client_java/tree/main/examples/example-exemplars-tail-sampling) +directory has a complete end-to-end example, with a distributed Java application with two services, +an OpenTelemetry collector, Prometheus, Tempo as a trace database, and Grafana dashboards. Use +docker-compose as described in the example's readme to run the example and explore the results. diff --git a/docs/hugo.toml b/docs/hugo.toml index b453bcd2a..b558774ec 100644 --- a/docs/hugo.toml +++ b/docs/hugo.toml @@ -28,3 +28,7 @@ enableRobotsTXT = true [taxonomies] tag = "tags" + +[caches] + [caches.images] + dir = ':cacheDir/images' diff --git a/docs/static/custom.css b/docs/static/custom.css index 41ab7c562..ed919a35d 100644 --- a/docs/static/custom.css +++ b/docs/static/custom.css @@ -37,3 +37,7 @@ --footer-link-color-visited: #ffffff; } } + +.gdoc-markdown pre,.gdoc-markdown code { + overflow: auto; +} \ No newline at end of file diff --git a/docs/themes/hugo-geekdoc/data/assets.json b/docs/themes/hugo-geekdoc/data/assets.json index 9a34ed54d..81541fbbc 100644 --- a/docs/themes/hugo-geekdoc/data/assets.json +++ b/docs/themes/hugo-geekdoc/data/assets.json @@ -155,4 +155,4 @@ "src": "custom.css", "integrity": "sha512-1kALo+zc1L2u1rvyxPIew+ZDPWhnIA1Ei2rib3eHHbskQW+EMxfI9Ayyva4aV+YRrHvH0zFxvPSFIuZ3mfsbRA==" } -} \ No newline at end of file +} diff --git a/docs/themes/hugo-geekdoc/i18n/nl.yaml b/docs/themes/hugo-geekdoc/i18n/nl.yaml index 8e24d62a4..240bcea5a 100644 --- a/docs/themes/hugo-geekdoc/i18n/nl.yaml +++ b/docs/themes/hugo-geekdoc/i18n/nl.yaml @@ -36,7 +36,7 @@ posts_tagged_with: Alle berichten gemarkeerd met '{{ . }}' footer_build_with: > Gebouwd met Hugo en - + footer_legal_notice: Juridische mededeling footer_privacy_policy: Privacybeleid footer_content_license_prefix: > diff --git a/docs/themes/hugo-geekdoc/layouts/partials/language.html b/docs/themes/hugo-geekdoc/layouts/partials/language.html index fdcafd2b2..8ad972d07 100644 --- a/docs/themes/hugo-geekdoc/layouts/partials/language.html +++ b/docs/themes/hugo-geekdoc/layouts/partials/language.html @@ -1,4 +1,4 @@ -{{ if .Site.IsMultiLingual }} +{{ if .IsTranslated }}