diff --git a/_data/documentation/0_21_0.yaml b/_data/documentation/0_21_0.yaml
new file mode 100644
index 00000000..866f62ed
--- /dev/null
+++ b/_data/documentation/0_21_0.yaml
@@ -0,0 +1,115 @@
+docs:
+ - title: Proxy Quick Start
+ description: Start here if you're experimenting with the proxy for the first time.
+ tags:
+ - proxy
+ rank: '000'
+ path: html/proxy-quick-start
+ - title: Proxy Guide
+ description: "Using the Proxy, including configuration, security and operation."
+ tags:
+ - proxy
+ - security
+ rank: '010'
+ path: html/kroxylicious-proxy
+ - title: Record Encryption Quick Start
+ description: Start here for an encryption-at-rest solution for Apache Kafka®.
+ tags:
+ - security
+ - filter
+ rank: '011'
+ path: html/record-encryption-quick-start
+ - title: Kroxylicious Operator for Kubernetes
+ description: Using the Kroxylicious Operator to deploy and run the Proxy in a
+ Kubernetes environment.
+ tags:
+ - kubernetes
+ rank: '020'
+ path: html/kroxylicious-operator
+ - title: Record Encryption Guide
+ description: Using the record encryption filter to provide encryption-at-rest
+ for Apache Kafka®.
+ tags:
+ - security
+ - filter
+ rank: '020'
+ path: html/record-encryption-guide
+ - title: Kroxylicious Admission Webhook
+ description: Using the Kroxylicious Admission Webhook to inject proxy sidecars
+ into application pods in a Kubernetes environment.
+ tags:
+ - kubernetes
+ rank: '021'
+ path: html/admission-webhook-guide
+ - title: Record Validation Guide
+ description: "Using the record validation filter to ensure records follow certain\
+ \ rules, including schema and signature validity."
+ tags:
+ - governance
+ - filter
+ rank: '021'
+ path: html/record-validation-guide
+ - title: Multi-tenancy Guide
+ description: Using the multi-tenancy filter to present a single Kafka® cluster
+ as if it were multiple clusters.
+ tags:
+ - filter
+ rank: '022'
+ path: html/multi-tenancy-guide
+ - title: Oauth Bearer Validation guide
+ description: "Using the Oauth Bearer validation filter to validate JWT tokens\
+ \ received \nfrom Kafka® clients during authentication.\n"
+ tags:
+ - filter
+ - security
+ rank: '023'
+ path: html/oauth-bearer-validation
+ - title: SASL Inspection Guide
+ description: Using the SASL Inspection filter to infer the client's subject from
+ its successful authentication exchange with a broker.
+ tags:
+ - filter
+ - security
+ rank: '023'
+ path: html/sasl-inspection-guide
+ - title: Authorization Guide
+ description: Using the Authorization filter to provide Kafka®-equivalent access
+ controls within the proxy.
+ tags:
+ - security
+ - filter
+ rank: '024'
+ path: html/authorization-guide
+ - title: Entity Isolation Guide
+ description: Using the entity isolation filter to give authenticated Kafka® clients
+ a private namespace within a Kafka cluster.
+ tags:
+ - filter
+ rank: '025'
+ path: html/entity-isolation-guide
+ - title: Connection Expiration Guide
+ description: Using the connection expiration filter to avoid connection skew in
+ Kubernetes environments.
+ tags:
+ - kubernetes
+ - filter
+ rank: '030'
+ path: html/connection-expiration-guide
+ - title: Developer Quick Start
+ description: Start here if you're developing a filter for the first time.
+ tags:
+ - developer
+ rank: '031'
+ path: html/developer-quick-start
+ - title: Kroxylicious Developer Guide
+ description: Writing plugins for the proxy in the Java programming language.
+ tags:
+ - developer
+ rank: '032'
+ path: html/developer-guide
+ - title: Kroxylicious Javadocs
+ description: The Java API documentation for plugin developers.
+ tags:
+ - developer
+ path: javadoc/index.html
+ rank: '033'
diff --git a/_data/kroxylicious.yml b/_data/kroxylicious.yml
index 6d9009fb..bff96dba 100644
--- a/_data/kroxylicious.yml
+++ b/_data/kroxylicious.yml
@@ -1,3 +1,3 @@
# The version number of the latest release
-latestRelease: 0.20.0
+latestRelease: 0.21.0
diff --git a/_data/release/0_21_0.yaml b/_data/release/0_21_0.yaml
new file mode 100644
index 00000000..3ce8f100
--- /dev/null
+++ b/_data/release/0_21_0.yaml
@@ -0,0 +1,46 @@
+#
+# Copyright Kroxylicious Authors.
+#
+# Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
+#
+
+releaseNotesUrl: https://github.com/kroxylicious/kroxylicious/releases/tag/v$(VERSION)/
+assetBaseUrl: https://github.com/kroxylicious/kroxylicious/releases/download/v$(VERSION)/
+assets:
+ - name: Proxy
+ description: The proxy application.
+ downloads:
+ - format: zip
+ path: kroxylicious-app-$(VERSION)-bin.zip
+ - format: tar.gz
+ path: kroxylicious-app-$(VERSION)-bin.tar.gz
+ - name: Operator
+ description: The Kubernetes operator.
+ downloads:
+ - format: zip
+ path: kroxylicious-operator-$(VERSION).zip
+ - format: tar.gz
+ path: kroxylicious-operator-$(VERSION).tar.gz
+ - name: Webhook
+ description: The Kubernetes admission webhook
+ downloads:
+ - format: zip
+ path: kroxylicious-admission-$(VERSION).zip
+ - format: tar.gz
+ path: kroxylicious-admission-$(VERSION).tar.gz
+images:
+ - name: Proxy
+ url: https://quay.io/repository/kroxylicious/proxy?tab=tags
+ registry: quay.io/kroxylicious/proxy
+ tag: $(VERSION)
+ digest: sha256:6bb6612d7f223eeee226fe656bbc5ebd9e41f03fec78e22df975ff810ea71c42
+ - name: Operator
+ url: https://quay.io/repository/kroxylicious/operator?tab=tags
+ registry: quay.io/kroxylicious/operator
+ tag: $(VERSION)
+ digest: sha256:5d87fa606143dbefc2327308a936617cee8e31b6b942026951939a309d55787a
+ - name: Webhook
+ url: https://quay.io/repository/kroxylicious/webhook?tab=tags
+ registry: quay.io/kroxylicious/webhook
+ tag: $(VERSION)
+ digest: sha256:b48b50754e6820b6d23a318a89d4f4daecf39c67db137cd86f5ee5a5326a58fb
\ No newline at end of file
diff --git a/documentation/0.21.0/html/admission-webhook-guide/content.html b/documentation/0.21.0/html/admission-webhook-guide/content.html
new file mode 100644
index 00000000..6f5fb557
--- /dev/null
+++ b/documentation/0.21.0/html/admission-webhook-guide/content.html
@@ -0,0 +1,613 @@
+{% raw %}
+
+
+
+
+
+
+
+
+
+
+
+
+
The Kroxylicious Admission Webhook is an early access feature and is not yet ready for general use.
+
+
+
The KroxyliciousSidecarConfig custom resource API (sidecar.kroxylicious.io/v1alpha1) is subject to change without notice in future releases. Do not rely on the stability of this API for production workloads.
+
+
+
+
+
+
+
About this guide
+
+
This guide covers installing and using the Kroxylicious Admission Webhook to inject Kroxylicious proxy sidecars into application pods on Kubernetes. For standalone proxy deployments using the Kroxylicious Operator, refer to the Kroxylicious Operator for Kubernetes guide.
+
+
+
+
+
1. Admission webhook overview
+
+
+
The Kroxylicious Admission Webhook is a Kubernetes mutating admission webhook that automatically injects a Kroxylicious proxy sidecar into application pods. The sidecar proxies Kafka traffic through localhost, allowing filters to be applied transparently without changes to application code.
+
+
+
1.1. How it works
+
+
When a pod is created in an enabled namespace, the webhook intercepts the admission request and mutates the pod specification to add:
+
+
+
+
+
A Kroxylicious proxy sidecar container
+
+
+
A volume projecting the proxy configuration from a pod annotation
+
+
+
The KAFKA_BOOTSTRAP_SERVERS environment variable on application containers, pointing them to the sidecar proxy on localhost
+
+
+
TLS trust anchor volumes, if TLS is configured for the target Kafka cluster
+
+
+
+
+
The sidecar forwards traffic to the target Kafka cluster specified in the KroxyliciousSidecarConfig custom resource.
+
+
+
+
1.2. Namespace opt-in
+
+
Sidecar injection is opt-in at the namespace level. The webhook only intercepts pod creation requests in namespaces that carry the label:
+
+
+
+
sidecar.kroxylicious.io/injection: enabled
+
+
+
+
Namespaces without this label are unaffected.
+
+
+
+
1.3. Pod opt-out
+
+
Individual pods can opt out of sidecar injection by setting the following label:
+
+
+
+
sidecar.kroxylicious.io/injection: disabled
+
+
+
+
Pods that already have a container named kroxylicious-proxy are also skipped to avoid double injection.
+
+
+
+
1.4. Failure policy
+
+
By default, the webhook uses fail-closed semantics. If the webhook encounters an internal error while processing an admission request, it rejects the pod. The MutatingWebhookConfiguration uses failurePolicy: Fail so that if the webhook is unavailable pod creation is blocked until the webhook becomes available again. This ensures that pods in injection-enabled namespaces are never created without the expected sidecar.
+
+
+
The failure policy is controlled in two places that must be kept in sync: the FAILURE_POLICY environment variable on the webhook Deployment, and the failurePolicy field in the MutatingWebhookConfiguration. To switch to fail-open semantics, set both to Ignore.
+
+
+
+
1.5. Trust model
+
+
The webhook enforces a trust boundary between two roles:
+
+
+
+
Webhook administrator
+
+
Controls what gets injected: the proxy image, filters, target Kafka cluster, and security context. Creates KroxyliciousSidecarConfig resources.
+
+
Application pod owner
+
+
Can opt out of injection via pod labels. Can select a specific KroxyliciousSidecarConfig by name via the sidecar.kroxylicious.io/config annotation. Cannot tamper with the proxy configuration, image, or security context.
+
+
+
+
+
The webhook always overwrites the sidecar.kroxylicious.io/proxy-config annotation, preventing application pod owners from pre-setting a malicious configuration.
+
+
+
+
1.6. Native sidecar support
+
+
On Kubernetes 1.29 and later (where the SidecarContainers feature gate is enabled by default), the webhook injects the sidecar as a native sidecar (an init container with restartPolicy: Always). This ensures proper startup ordering (the sidecar starts before application containers) and shutdown ordering (the sidecar stops after application containers). On older clusters, the sidecar is injected as a regular container. The webhook detects the cluster version automatically at startup.
+
+
+
For clusters running alpha versions of features (e.g. native sidecars on 1.28, OCI image volumes on 1.31-1.32), set the K8S_FEATURE_GATES environment variable on the webhook deployment to override version-based defaults (e.g. K8S_FEATURE_GATES=SidecarContainers=true,ImageVolume=true). This is an escape hatch for when version-based detection is insufficient; deployers are responsible for keeping it in sync with their cluster configuration.
+
+
+
+
1.7. Pod security context
+
+
The webhook sets a container-level security context on the sidecar (allowPrivilegeEscalation: false, capabilities: drop ALL, readOnlyRootFilesystem: true, seccompProfile: RuntimeDefault) but does not set a pod-level securityContext.
+
+
+
Pod-level security policies such as runAsNonRoot and seccompProfile: RuntimeDefault should be enforced using Kubernetes Pod Security Standards via the PodSecurity admission controller. Label the namespace with the appropriate enforcement level:
This is the recommended approach because it applies uniformly to all pods in the namespace and avoids ordering conflicts between multiple mutating admission webhooks.
+
+
+
+
+
+
2. Custom resource API compatibility
+
+
+
The admission webhook uses a single custom resource: KroxyliciousSidecarConfig, in the sidecar.kroxylicious.io API group at version v1alpha1.
+
+
+
+
+
+
+
+
+
The v1alpha1 API version does not provide backwards-compatibility guarantees. The schema, field names, semantics, and defaults may change between releases without a migration path. Do not depend on the stability of this API for production workloads.
+
+
+
+
+
+
+
+
+
+
3. Installing the Admission Webhook
+
+
+
This section provides instructions for installing the Kroxylicious Admission Webhook.
+
+
+
Install the admission webhook by applying the installation files included with the release artifacts.
+
+
+
3.1. Install prerequisites
+
+
To install the Kroxylicious Admission Webhook, you will need the following:
+
+
+
+
+
A Kubernetes 1.31 or later cluster. For development purposes, Minikube may be used.
+
+
+
The kubectl command-line tool installed and configured to connect to the running cluster.
+
+
+
An account with permission to create CustomResourceDefinition, MutatingWebhookConfiguration, and RBAC (ClusterRole) resources.
+
+
+
cert-manager installed in the cluster, for automated TLS certificate provisioning.
+
+
+
+
+
+
It is possible to install the webhook without cert-manager by provisioning TLS certificates manually.
+
+
+
+
+
+
+
+
+
For more information on the tools available for running Kubernetes, see Install Tools in the Kubernetes documentation.
+
+
+
+
3.2. Admission webhook release artifacts
+
+
To install the admission webhook using YAML manifest files, download the kroxylicious-webhook-0.21.0.zip or kroxylicious-webhook-0.21.0.tar.gz file from the GitHub release page, and extract the files as appropriate (for example using unzip or tar -xzf).
+
+
+
Each of these archives contains:
+
+
+
+
Installation files
+
+
In the install directory are the YAML manifests needed to install the admission webhook.
+
+
Examples
+
+
In the examples directory are example resources, including a cert-manager Certificate for automated TLS certificate provisioning.
+
+
+
+
+
+
3.3. Installing the admission webhook using installation files
+
+
This procedure shows how to install the Kroxylicious Admission Webhook in your Kubernetes cluster using cert-manager for TLS certificate provisioning.
+
+
+
Prerequisites
+
+
+
You need an account with permission to create CustomResourceDefinition, MutatingWebhookConfiguration, and RBAC (ClusterRole) resources.
+
+
+
You have downloaded the release artifacts and extracted the contents into the current directory.
A Kafka cluster is available and you know its bootstrap server address.
+
+
+
+
+
Procedure
+
+
+
Create a KroxyliciousSidecarConfig custom resource in the namespace where your application pods run. At a minimum, you must specify a virtual cluster with the target Kafka bootstrap address:
(Optional) Configure resource requests and limits for the sidecar proxy container. By default, the sidecar container is specified without resource constraints. To specify CPU and memory resources, add the resources field to your KroxyliciousSidecarConfig:
The resources field follows the standard Kubernetes ResourceRequirements format:
+
+
+
+
+
+
+
requests.memory and requests.cpu: The minimum resources guaranteed to the sidecar container. The scheduler uses these values when placing the pod.
+
+
+
limits.memory and limits.cpu: The maximum resources the sidecar container can consume. If the container exceeds the memory limit, it is terminated and restarted.
+
+
+
+
+
+
+
+
(Optional) Configure filters to transform traffic passing through the sidecar proxy. Add the filterDefinitions field to your KroxyliciousSidecarConfig to apply filters:
type: The FilterFactory plugin type (e.g., ProduceRequestTransformation).
+
+
+
config: Plugin-specific configuration. In this example, the UpperCasing transformation converts all produce request message values to uppercase.
+
+
+
+
+
+
+
+
(Optional) Mount secrets to provide credentials or sensitive configuration to filters. Secrets are mounted as read-only files in the sidecar container.
+
+
Create a Kubernetes Secret in the namespace containing the credentials:
Create or recreate application pods in the labelled namespace.
+
+
+
+
+
+
The webhook only processes pod CREATE requests. Existing pods are not modified. You must delete and recreate pods (or roll the deployment) for the sidecar to be injected.
+
+
+
+
+
+
+
Verify that the sidecar was injected by listing the containers in a pod:
+
+
+
kubectl get pod my-pod -n my-app-namespace -o jsonpath='{.spec.initContainers[*].name}'
+
+
+
+
The output should include kroxylicious-proxy alongside your application containers.
+
+
+
+
(Optional) To opt out individual pods from sidecar injection, set the following label on the pod:
(Optional) To find pods where sidecar injection was skipped, query the sidecar.kroxylicious.io/injection-skipped label. The webhook sets this label on any pod where injection was skipped for a reason other than opt-out.
+
+
To list all skipped pods in a namespace:
+
+
+
+
kubectl get pods -n my-app-namespace -l sidecar.kroxylicious.io/injection-skipped
+
+
+
+
To filter by a specific skip reason (e.g. no-config when no KroxyliciousSidecarConfig exists in the namespace):
+
+
+
+
kubectl get pods -n my-app-namespace -l sidecar.kroxylicious.io/injection-skipped=no-config
+
+
+
+
+
+
+
+
+
5. Trademark notice
+
+
+
+
+
Apache Kafka is a registered trademark of The Apache Software Foundation.
+
+
+
Kubernetes is a registered trademark of The Linux Foundation.
+
+
+
Prometheus is a registered trademark of The Linux Foundation.
+
+
+
Strimzi is a trademark of The Linux Foundation.
+
+
+
Hashicorp Vault is a registered trademark of HashiCorp, Inc.
+
+
+
AWS Key Management Service is a trademark of Amazon.com, Inc. or its affiliates.
+
+
+
Microsoft, Azure, and Microsoft Entra are trademarks of the Microsoft group of companies.
+
+
+
Fortanix and Data Security Manager are trademarks of Fortanix, Inc.
+
+
+
+
+
+
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/admission-webhook-guide/index.html b/documentation/0.21.0/html/admission-webhook-guide/index.html
new file mode 100644
index 00000000..d260f478
--- /dev/null
+++ b/documentation/0.21.0/html/admission-webhook-guide/index.html
@@ -0,0 +1,11 @@
+---
+layout: guide
+title: Kroxylicious Admission Webhook
+description: Using the Kroxylicious Admission Webhook to inject proxy sidecars into
+ application pods in a Kubernetes environment.
+tags:
+ - kubernetes
+rank: '021'
+version: 0.21.0
+permalink: /documentation/0.21.0/html/admission-webhook-guide/
+---
diff --git a/documentation/0.21.0/html/admission-webhook-guide/toc.html b/documentation/0.21.0/html/admission-webhook-guide/toc.html
new file mode 100644
index 00000000..3df81be7
--- /dev/null
+++ b/documentation/0.21.0/html/admission-webhook-guide/toc.html
@@ -0,0 +1,25 @@
+{% raw %}
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/authorization-guide/content.html b/documentation/0.21.0/html/authorization-guide/content.html
new file mode 100644
index 00000000..f5ffd868
--- /dev/null
+++ b/documentation/0.21.0/html/authorization-guide/content.html
@@ -0,0 +1,912 @@
+{% raw %}
+
+
+
+
+
+
About this guide
+
+
This guide covers using the Kroxylicious Authorization filter to enforce authorization rules on client requests before they reach the Kafka brokers. Refer to other Kroxylicious guides for information about running the proxy or for advanced topics such as plugin development.
+
+
+
+
+
1. Authorization overview
+
+
+
The Authorization filter provides the ability for the proxy to enforce an authorization policy about Kafka resources. These authorization checks are performed in addition to any authorization checks made by the broker itself. For an action to be allowed, both the authorization filter and broker must decide that the action is allowed.
+
+
+
In general, the Authorization filter makes access decisions in the same manner as Kafka itself. A client cannot distinguish between authorization enforced on the proxy and authorization enforced on the kafka cluster itself.
+
+
+
In order to use the Authorization filter, the proxy must be able to determine the authenticated subject. The authenticated subject is the verified identity of the client, derived from its successful authentication.
If your applications use SASL authentication, configure the SASL inspection filter to build the authenticated subject from the successful SASL exchange between the client and the broker.
+
+
+
+
+
+
+
2. Authorization Model
+
+
+
In Kafka, clients perform operations on resources.
+
+
+
The following tables list the resource types and the operations that apply to them.
+
+
+
2.1. Resource types and operations
+
+
2.1.1. Topic operations
+
+
This table lists the topic operations enforced by the authorization filter:
+
+
+
+
+
+
+
+
+
Operation
+
Typical use case
+
+
+
+
+
+
READ
+
+
+
Required for a consumer to fetch records.
+
+
+
+
+
WRITE
+
+
+
Required for a producer to send records.
+
+
+
+
+
CREATE
+
+
+
Required for an admin client to create, delete, or alter topics.
+
+
+
+
+
DELETE
+
+
+
+
+
ALTER
+
+
+
+
+
DESCRIBE
+
+
+
Required for an admin client to perform describe operations that refer to topic resources.
+
+
+
+
+
DESCRIBE_CONFIGS
+
+
+
Required for an admin client to perform describe config operations that refer to topic configuration.
+
+
+
+
+
ALTER_CONFIGS
+
+
+
Required for an admin client to perform alter config operations that relate to topic configuration.
+
+
+
+
+
+
+
2.1.2. Group operations
+
+
This table lists the operations enforced by the authorization filter for the group resource type:
+
+
+
+
+
+
+
+
+
Operations
+
Typical use case
+
+
+
+
+
+
READ
+
+
+
Required for a consumer to join a group.
+
+
+
+
+
DELETE
+
+
+
Required for an admin client to delete a group or offsets within a group.
+
+
+
+
+
DESCRIBE
+
+
+
Required for a consumer and admin client to find and describe a group and its offsets.
+
+
+
+
+
DESCRIBE_CONFIGS
+
+
+
Required for an admin client to perform describe operations that refer to group configuration.
+
+
+
+
+
ALTER_CONFIGS
+
+
+
Required for an admin client to perform operations that alter group configuration.
+
+
+
+
+
+
+
2.1.3. Transactional ID operations
+
+
This table lists the operations enforced by the authorization filter for the transactional ID resource type:
+
+
+
+
+
+
+
+
+
Operations
+
Typical use case
+
+
+
+
+
+
WRITE
+
+
+
Required for a producer or consumer to participate in a transaction.
+
+
+
+
+
DESCRIBE
+
+
+
Required for a producer, consumer, or admin client to find and describe transactions.
+
+
+
+
+
+
+
+
2.2. Implied operation permissions
+
+
In the authorization model, some operations imply permission to perform other operations. This table lists the higher-level operations and the implied lower-level operations they include.
+
+
+
+
+
+
+
+
+
+
Resource type
+
Operation
+
Implied operation
+
+
+
+
+
+
Topic
+
+
+
READ
+
+
+
DESCRIBE
+
+
+
+
+
Topic
+
+
+
WRITE
+
+
+
DESCRIBE
+
+
+
+
+
Topic
+
+
+
DELETE
+
+
+
DESCRIBE
+
+
+
+
+
Topic
+
+
+
ALTER
+
+
+
DESCRIBE
+
+
+
+
+
Topic
+
+
+
ALTER_CONFIGS
+
+
+
DESCRIBE_CONFIGS
+
+
+
+
+
Group
+
+
+
READ
+
+
+
DESCRIBE
+
+
+
+
+
Group
+
+
+
DELETE
+
+
+
DESCRIBE
+
+
+
+
+
Group
+
+
+
ALTER_CONFIGS
+
+
+
DESCRIBE_CONFIGS
+
+
+
+
+
TransactionalId
+
+
+
WRITE
+
+
+
DESCRIBE
+
+
+
+
+
+
+
+
+
3. Authorization rules
+
+
+
The authorization rules define which principals can perform specific operations on specific Kafka resources.
+
+
+
3.1. Outline of a rule file
+
+
The following example shows the overall outline of a rule file. The sections that follow give more details.
+
+
+
+
// Comment
+from io.kroxylicious.filter.authorization import TopicResource as Topic;
+
+deny User with name = "alice" to * Topic with name = "payments-received";
+allow User with name = "alice" to * Topic with name like "payments-*";
+
+otherwise deny;
+
+
+
+
+
3.2. Comments
+
+
Both line and block comments are supported. Line comments are preceded by //. Block comments are bracketed by /* … */ markers. Comments are ignored.
+
+
+
+
3.3. Imports
+
+
Resource types must be imported before use. This is achieved using a from / import statement.
+
+
+
+
from <package> import <element> [as <alias>][,... , <elementn> [as <aliasn>]];
+
+
+
+
where:
+
+
+
+
+
<package> is io.kroxylicious.filter.authorization
+
+
+
<element> is a ResourceType implementation name.
+
+
+
<alias> is an optional alias for the resource type.
+
+
+
+
+
The following resource types are available within the io.kroxylicious.filter.authorization package:
+
+
+
+
+
+
+
+
+
+
Resource type
+
Implementation name
+
Description
+
+
+
+
+
+
Topic
+
+
+
TopicResource
+
+
+
Represents Kafka topics.
+
+
+
+
+
Group
+
+
+
GroupResource
+
+
+
Represents Kafka consumer groups.
+
+
+
+
+
TransactionalId
+
+
+
TransactionalIdResource
+
+
+
Represents Kafka transactional IDs.
+
+
+
+
+
+
For example, TopicResource is the implementation that represents Kafka topics. To declare it use a import statement like this.
+
+
+
+
from io.kroxylicious.filter.authorization import TopicResource;
+
+
+
+
To declare it with an alias, use an import statement like this:
+
+
+
+
from io.kroxylicious.filter.authorization import TopicResource as Topic;
+
+
+
+
+
3.4. Rules
+
+
The basic form of a rule is as follows:
+
+
+
+
<allow|deny> User with <user predicate> to <operation> <resource type> with <resource predicate>;
+
+
+
+
where:
+
+
+
+
+
<allow|deny> indicates whether to allow or deny the action.
+
+
+
<user predicate> matches the user principal performing the action.
+
+
+
<resource type> identifies the resource type being acted upon. This can either be the name of the resource type name or an alias for it.
+
+
+
<resource predicate> identifies the resource.
+
+
+
<operation> identifies the operation(s) to be performed on the resource.
+
+
+
+
+
Rules must be ordered so that any deny rules precede the allow rules.
+
+
+
When rules are evaluated, they are considered from top to bottom, with the first matching rule taking precedence.
+
+
+
+
3.5. Otherwise deny
+
+
Rules files must end with the statement otherwise deny. This stipulation means that all rules files have deny-by-default semantics.
+
+
+
+
...
+otherwise deny;
+
+
+
+
+
3.6. User predicates
+
+
The following User predicates are supported:
+
+
+
+
+
+
+
+
+
+
Predicate
+
Description
+
Example
+
+
+
+
+
+
=
+
+
+
Equals
+
+
+
name = "alice"
+
+
+
+
+
IN
+
+
+
Set inclusion
+
+
+
name in {"alice", "bob"}
+
+
+
+
+
LIKE
+
+
+
Prefix (note that the wildcard * is only permitted at the end of the prefix.)
+
+
+
name like "bob*"
+
+
+
+
+
+
+
3.7. Resource Predicates
+
+
The following resource predicates are supported:
+
+
+
+
+
+
+
+
+
+
Predicate
+
Description
+
Example
+
+
+
+
+
+
=
+
+
+
Equality
+
+
+
name = "mytopic"
+
+
+
+
+
IN
+
+
+
Set inclusion
+
+
+
name in {"topic1", "topic2"}
+
+
+
+
+
LIKE
+
+
+
Prefix (wildcard * is permitted only at the end)
+
+
+
name like "finance*"
+
+
+
+
+
MATCHING
+
+
+
Regular expression match
+
+
+
name matching "a+"
+
+
+
+
+
+
+
3.8. Operations
+
+
Operations in rules can be specified in the following ways:
+
+
+
+
+
As a single operation, for example READ
+
+
+
As a set of operations, for example {READ, WRITE}
+
+
+
As a wildcard that matches any operation, for example *
+
+
+
+
+
+
+
+
+
The Authorization filter does not support the keyword ALL.
+
+
+
+
+
+
+
+
+
4. Configuring the Authorization filter
+
+
+
This procedure describes how to set up the Authorization filter by configuring it in Kroxylicious.
In a Kubernetes deployment use a ConfigMap resource. See Example ACL Rules
+
+
+
+
+
+
+
+
4.1. Example proxy configuration file
+
+
If your instance of the Kroxylicious Proxy runs directly on an operating system, provide the Authorization filter configuration in the filterDefinitions list of your proxy configuration.
+
+
+
Here’s a complete example of a filterDefinitions entry configured for authorization:
authorizer is the name of the authorizer service implementation. Currently, this must be AclAuthorizerService.
+
+
+
aclFile is the reference file containing the ACL rules. You can use an interpolation reference to reference rules stored within a Kubernetes ConfigMap or Secret resource.
+
+
+
+
+
+
4.3. Example ACL Rules
+
+
If your instance of the Kroxylicious Proxy runs directly on an operating system, provide a file containing the rules and reference it from the proxy configuration.
+
+
+
Save the file with a name of your choice and reference it in the filter configuration. The following example shows an ACL rules file referenced as acl-rules.txt:
+
+
+
+
from io.kroxylicious.filter.authorization import TopicResource as Topic, GroupResource as Group, TransactionalIdResource as TransactionalId;
+deny User with name = "alice" to * Topic with name = "payments-received";
+allow User with name = "alice" to * Topic with name like "payments-*";
+allow User with name = "bob" to * Topic with name = "payments-received";
+allow User with name = "bob" to READ Group with name = "payment-consumer-group";
+allow User with name = "alice" to * TransactionalId with name like "alice-transaction-*";
+otherwise deny;
+
+
+
+
+
4.4. Example ACL Rules
+
+
If your instance of Kroxylicious runs on Kubernetes, you must use a ConfigMap resource to contain the ACL rules.
+
+
+
Here’s a complete example of a ConfigMap resource configured for authorization:
+
+
+
+ Example ConfigMap resource containing the ACL rules
+
+
+
apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: acl-rules
+data:
+ acl-rules.txt: |
+ from io.kroxylicious.filter.authorization import TopicResource as Topic, GroupResource as Group, TransactionalIdResource as TransactionalId;
+ deny User with name = "alice" to * Topic with name = "payments-received";
+ allow User with name = "alice" to * Topic with name like "payments-*";
+ allow User with name = "bob" to * Topic with name = "payments-received";
+ allow User with name = "bob" to READ Group with name = "payment-consumer-group";
+ allow User with name = "alice" to * TransactionalId with name like "alice-transaction-*";
+ otherwise deny;
+
+
+
+
+
+
+
5. Trademark notice
+
+
+
+
+
Apache Kafka is a registered trademark of The Apache Software Foundation.
+
+
+
Kubernetes is a registered trademark of The Linux Foundation.
+
+
+
Prometheus is a registered trademark of The Linux Foundation.
+
+
+
Strimzi is a trademark of The Linux Foundation.
+
+
+
Hashicorp Vault is a registered trademark of HashiCorp, Inc.
+
+
+
AWS Key Management Service is a trademark of Amazon.com, Inc. or its affiliates.
+
+
+
Microsoft, Azure, and Microsoft Entra are trademarks of the Microsoft group of companies.
+
+
+
Fortanix and Data Security Manager are trademarks of Fortanix, Inc.
+
+
+
+
+
+
+
6. Glossary
+
+
+
Glossary of terms used in the Authorization guide.
+
+
+
+
Subject
+
+
The identity of the client for the purposes of applying policies within the proxy. Whether this is the same as the broker’s notion of subject depends on how authentication is configured in the proxy.
+
+
Principal
+
+
A component of a subject.
+
+
Resource
+
+
An entity which an authorizer can control access to. Resources are identified by a type and a name. Examples include Kafka topics and consumer groups (where the group ID is treated as the resource name).
+
+
Operations
+
+
The things that can be done to resources of a particular type. For example, a Kafka topic resource has operations which include describe, read and write.
+
+
Action
+
+
A resource and an operation such as read the topic called invoices.
+
+
Authorizer
+
+
A component that makes an authorization decision, usually based on some kind of access policy.
+
+
Decision
+
+
The outcome of the authorization of a particular action. This is either allow or deny.
+
+
+
+
+
+
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/authorization-guide/index.html b/documentation/0.21.0/html/authorization-guide/index.html
new file mode 100644
index 00000000..45ba6c83
--- /dev/null
+++ b/documentation/0.21.0/html/authorization-guide/index.html
@@ -0,0 +1,12 @@
+---
+layout: guide
+title: Authorization Guide
+description: Using the Authorization filter to provide Kafka®-equivalent access controls
+ within the proxy.
+tags:
+ - security
+ - filter
+rank: '024'
+version: 0.21.0
+permalink: /documentation/0.21.0/html/authorization-guide/
+---
diff --git a/documentation/0.21.0/html/authorization-guide/toc.html b/documentation/0.21.0/html/authorization-guide/toc.html
new file mode 100644
index 00000000..af7d779e
--- /dev/null
+++ b/documentation/0.21.0/html/authorization-guide/toc.html
@@ -0,0 +1,32 @@
+{% raw %}
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/connection-expiration-guide/content.html b/documentation/0.21.0/html/connection-expiration-guide/content.html
new file mode 100644
index 00000000..030ac5b0
--- /dev/null
+++ b/documentation/0.21.0/html/connection-expiration-guide/content.html
@@ -0,0 +1,163 @@
+{% raw %}
+
+
+
+
+
+
About this guide
+
+
This guide covers using the Kroxylicious Connection Expiration Filter. This filter closes client connections after a configurable expiration age, helping to rebalance connections across proxy instances in dynamic environments such as Kubernetes.
+
+
+
Refer to other Kroxylicious guides for information on running the proxy or for advanced topics such as plugin development.
+
+
+
This filter closes client connections after a configurable expiration age. This is useful in environments like Kubernetes where proxy instances scale up and down. Long-lived client connections can become stuck on old proxy instances, causing connection skew. By closing connections after a maximum age, clients will reconnect and be balanced across the available proxy instances.
+
+
+
When jitter is configured, the effective deadline is randomized within [maxAge - jitter, maxAge + jitter] per connection to avoid thundering herd reconnection storms.
+
+
+
+
+
1. Configuring the Connection Expiration filter
+
+
+
This procedure describes how to set up the Connection Expiration filter by configuring it in Kroxylicious.
If your instance of the Kroxylicious Proxy runs directly on an operating system, provide the filter configuration in the filterDefinitions list of your proxy configuration.
+
+
+
Here’s a complete example of a filterDefinitions entry configured for Connection Expiration:
maxAge (required) is the maximum age of a connection before it will be closed. Must be a positive duration (for example, "1h", "30m", "3600s").
+
+
+
jitter (optional) is the jitter to apply to the max age, randomizing the effective deadline within [maxAge - jitter, maxAge + jitter] per connection. Must be non-negative and not greater than maxAge. If omitted, no jitter is applied.
maxAge (required) is the maximum age of a connection before it will be closed. Must be a positive duration (for example, "1h", "30m", "3600s").
+
+
+
jitter (optional) is the jitter to apply to the max age, randomizing the effective deadline within [maxAge - jitter, maxAge + jitter] per connection. Must be non-negative and not greater than maxAge. If omitted, no jitter is applied.
+{% endraw %}
diff --git a/documentation/0.21.0/html/developer-guide/content.html b/documentation/0.21.0/html/developer-guide/content.html
new file mode 100644
index 00000000..85ac6a96
--- /dev/null
+++ b/documentation/0.21.0/html/developer-guide/content.html
@@ -0,0 +1,733 @@
+{% raw %}
+
+
+
+
+
+
About this guide
+
+
This guide covers developing plugins for Kroxylicious using the Java programming language. Other guides should be consulted if you want to deploy, configure or secure a Kroxylicious Proxy.
+
+
+
+
+
1. Kroxylicious Proxy overview
+
+
+
Kroxylicious is an Apache Kafka protocol-aware ("Layer 7") proxy designed to enhance Kafka-based systems. Through its filter mechanism it allows additional behavior to be introduced into a Kafka-based system without requiring changes to either your applications or the Kafka cluster itself. Built-in filters are provided as part of the solution.
+
+
+
Functioning as an intermediary, the Kroxylicious mediates communication between a Kafka cluster and its clients. It takes on the responsibility of receiving, filtering, and forwarding messages.
+
+
+
A Java API provides a convenient means for implementing custom logic within the proxy.
There are effectively two APIs a filter developer needs to care about:
+
+
+
+
+
The Filter API against which the filter is written. This is a contract between the Filter developer and the Kroxylicious runtime. It includes Filter, FilterFactory, which the developer is responsible for implementing, and FilterContext and FilterFactoryContext, which are provided by the Kroxylicous runtime for the developer to use.
+
+
+
The "configuration API" that your filter exposes. This is a contract between the Filter developer and Kroxylicious users.
+
+
+
+
+
1.1.1. Compatibility of the Filter API
+
+
The Kroxylicious project uses semantic versioning. For the filter API this means that you can compile your filter against the Kroxylicious API at version x.yc.zc and users will be able to use it with Kroxylicious runtimes at version x.yr.zr if the runtime version is not older than the compile time version (that is if yr ≥ yc and zr ≥ zc).
+
+
+
+
1.1.2. Compatibility of your Filter configuration
+
+
The Kroxylicious Proxy isn’t able to provide or enforce any compatibility guarantees about the configuration API that your plugin offers to users. In other words you are free you release your plugin at version a.b.c and later release a version a.d.e which doesn’t accept the same configuration syntax (JSON or YAML) that the original version did.
+
+
+
Doing this makes it more difficult for users to upgrade from older versions on your plugin, because they will have to rewrite and revalidate the configuration which worked with the old version.
+
+
+
For this reason filter developers are strongly encouraged to adopt Semantic versioning as the way to communicate compatibility of the configuration API they offer to users.
+
+
+
+
+
+
+
2. Custom filters
+
+
+
Custom filters can be written in the Java programming language. Kroxylicious supports Java 21. Knowledge of the Kafka protocol is generally required to write a protocol filter.
+
+
+
There is currently one class of Custom Filters users can implement:
Allow customisation of how protocol messages are handled on their way to, or from, the Cluster.
+
+
+
+
+
The following sections explain in more detail how to write your own filters.
+
+
+
2.1. Custom Filter Project Generation
+
+
You can generate a standalone filter project. This bootstraps a project to allow you to build your own Custom Filters. You can follow the Kroxylicious Developer Quickstart for a hands-on introduction to creating your own custom filters.
How filter classes are loaded is not currently defined by the filter contract. In other words, filters might be loaded using a classloader-per-filter model, or using a single class loader. This doesn’t really make a difference to filter authors except where they want to make use of libraries as dependencies. Because those dependencies might be loaded by the same classloader as the dependencies of other filters there is the possibility of collision. Filter A and Filter B might both want to use Library C, and they might want to use different versions of Library C.
+
+
+
For common things like logging and metric facade APIs it is recommended to use the facade APIs which are also used by the proxy core.
+
+
+
+
2.4. Protocol filters
+
+
A protocol filter is a public top-level, concrete class with a particular public constructor and which implements one or more protocol filter interfaces. You can implement two distinct types of Custom Protocol Filter:
Note that these types are mutually exclusive, for example a Filter is not allowed to implement both RequestFilter and MetadataRequestFilter. This is to prevent ambiguity. If we received a MetadataRequest, would it be dispatched to the onMetadataRequest(..) method of MetadataRequestFilter or the onRequest method of RequestFilter, or both? Instead, we disallow these combinations, throwing an exception at runtime if your Filter implements incompatible interfaces.
+
+
+
2.4.1. Specific Message Protocol Filters
+
+
A filter may wish to intercept specific types of Kafka messages. For example, intercept all Produce Requests, or intercept all Fetch Responses. To support this case Kroxylicious provides an interfaces for all request types and response types supported by Kafka (at the version of Kafka Kroxylicious depends on). A filter implementation can implement any combination of these interfaces.
+
+
+
There is no requirement that a Filter handles both the request and response halves of an RPC. A Filter can choose to intercept only the request, or only the response, or both the request and response.
+
+
+
Examples
+
+
To intercept all Fetch Requests your class would implement FetchRequestFilter:
Specific Message Filter interfaces are mutually exclusive with Request/Response. Kroxylicious will reject invalid combinations of interfaces.
+
+
+
+
+
2.4.2. Request/Response Protocol Filters
+
+
A filter may wish to intercept every message being sent from the Client to the Cluster or from the Cluster to the Client. To do this your custom filter will implement:
Request/Response Filter interfaces are mutually exclusive with Specific Message interfaces. Kroxylicious will reject invalid combinations of interfaces.
+
+
+
+
2.4.3. The Filter Result
+
+
As seen above, filter methods (onXyz[Request|Response]) must return a CompletionStage<FilterResult> object. It is the job of FilterResult to convey what message is to forwarded to the next filter in the chain (or broker /client if at the chain’s beginning or end). It is also used to carry instructions such as indicating that the connection must be closed, or a message dropped.
+
+
+
If the filter returns a CompletionStage that is already completed normally, Kroxylicious will immediately perform the action described by the FilterResult.
+
+
+
The filter may return a CompletionStage that is not yet completed. When this happens, Kroxylicious will pause reading from the downstream (the Client writes will eventually block), and it begins to queue up in-flight requests/responses arriving at the filter. This is done so that message order is maintained. Once the CompletionStage completes, the action described by the FilterResult is performed, reading from the downstream resumes and any queued up requests/responses are processed.
+
+
+
+
+
+
+
The pausing of reads from the downstream is a relatively costly operation. To maintain optimal performance filter implementations should minimise the occasions on which an incomplete CompletionStage is returned.
+
+
+
+
+
+
If the CompletionStage completes exceptionally, the connection is closed. This also applies if the CompletionStage does not complete within a timeout (20000 milliseconds).
+
+
+
Creating a Filter Result
+
+
The FilterContext is the factory for the FilterResult objects.
+
+
+
There are two convenience methods[1] that simply allow a filter to forward a result to the next filter. We’ve already seen these in action above.
+
+
+
+
+
context.forwardRequest(header, request) used by result filter to forward a request.
+
+
+
context.forwardResponse(header, response) used by result filter to forward a request.
+
+
+
+
+
To access richer features, use the filter result builders context.requestFilterResultBuilder() and responseFilterResultBuilder().
+
+
+
Filter result builders allow you to:
+
+
+
+
+
forward a request/response: .forward(header, request).
+
+
+
signal that a connection is to be closed: .withCloseConnection().
+
+
+
signal that a message is to be dropped (i.e. not forwarded): .drop().
+
+
+
for requests only, send a short-circuit response: .shortCircuitResponse(header, response)
+
+
+
+
+
The builder lets you combine legal behaviours together. For instance, to close the connection after forwarding a response to a client, a response filter could use:
The builders yield either a completed CompletionStage<FilterResult> which can be returned directly from the filter method, or bare FilterResult. The latter exists to support asynchronous programming styles allowing you to use your own Futures.
+
+
+
+
+
+
+
The drop behaviour can be legally used in very specific circumstances. The Kafka Protocol is, for the most part, strictly request/response with responses expected in the order the request were sent. The client will fail if the contract isn’t upheld. The exception is Produce where acks=0. Filters may drop these requests without introducing a protocol error.
+
+
+
+
+
+
+
+
2.4.4. The protocol filter lifecycle
+
+
Instances of the filter class are created on demand when a protocol message is first sent by a client. Instances are specific to the channel between a single client and a single broker.
+
+
+
It exists while the client remains connected.
+
+
+
+
2.4.5. Handling state
+
+
The simplest way of managing per-client state is to use member fields. The proxy guarantees that all methods of a given filter instance will always be invoked on the same thread (also true of the CompletionStage completion in the case of Sending asynchronous requests to the Cluster). Therefore, there is no need to use synchronization when accessing such fields.
This is a common pattern, we want to inspect or modify a message. For example:
+
+
+
+
publicclassSampleFetchResponseFilterimplements FetchResponseFilter {
+ @Override
+ public CompletionStage<ResponseFilterResult> onFetchResponse(short apiVersion,
+ ResponseHeaderData header,
+ FetchResponseData response,
+ FilterContext context) {
+ // We mutate the response object. For example, you could alter the records that have been fetched.
+ mutateResponse(response, context);
+ // We forward the response, sending it towards the client, invoking Filters downstream of this one.
+ return context.forwardResponse(header, response);
+ }
+}
+
+
+
+
+
+
+
+
We can only forward the response and header objects passed into the onFetchResponse. New instances are not supported.
+
+
+
+
+
+
+
Sending Response messages from a Request Filter towards the Client (Short-circuit responses)
+
+
In some cases we may wish to not forward a request from the client to the Cluster. Instead, we want to intercept that request and generate a response message in a Kroxylicious Protocol Filter and send it towards the client. This is called a short-circuit response.
+
+
+
+
+
+
Figure 1. Illustration of responding without proxying
+
+
+
For example:
+
+
+
+
publicclassCreateTopicRejectFilterimplements CreateTopicsRequestFilter {
+
+ public CompletionStage<RequestFilterResult> onCreateTopicsRequest(short apiVersion, RequestHeaderData header, CreateTopicsRequestData request,
+ FilterContext context) {
+
+ // Create a new instance of the corresponding response data and populate it. Note you may need to use the `apiVersion` to check which fields can be set at this request's API version.
+ CreateTopicsResponseData response = new CreateTopicsResponseData();
+ CreateTopicsResponseData.CreatableTopicResultCollection topics = new CreateTopicsResponseData.CreatableTopicResultCollection();
+ request.topics().forEach(creatableTopic -> {
+ CreateTopicsResponseData.CreatableTopicResult result = new CreateTopicsResponseData.CreatableTopicResult();
+ result.setErrorCode(Errors.INVALID_TOPIC_EXCEPTION.code()).setErrorMessage(ERROR_MESSAGE);
+ result.setName(creatableTopic.name());
+ topics.add(result);
+ });
+ response.setTopics(topics);
+ // We generate a short-circuit response that will send it towards the client, invoking Filters downstream of this one.
+ return context.requestFilterResultBuilder().shortCircuitResponse(response).completed();
+ }
+}
+
+
+
+
This will respond to all Create Topic requests with an error response without forwarding any of those requests to the Cluster.
+
+
+
Closing the connections
+
+
There is a useful variation on the pattern above, where the filter needs, in addition to sending an error response, also to cause the connection to close. This is useful in use-cases where the filter wishes to disallow certain client behaviours.
+
+
+
+
publicclassDisallowAlterConfigsimplements AlterConfigsRequestFilter {
+
+ @Override
+ public CompletionStage<RequestFilterResult> onAlterConfigsRequest(short apiVersion, RequestHeaderData header, AlterConfigsRequestData request,
+ FilterContext context) {
+ var response = new AlterConfigsResponseData();
+ response.setResponses(request.resources().stream()
+ .map(a -> new AlterConfigsResourceResponse()
+ .setErrorCode(Errors.INVALID_CONFIG.code())
+ .setErrorMessage("This service does not allow this operation - closing connection"))
+ .toList());
+ // We enable the close connection option on the builder. This will cause Kroxylicious to close the connection after the response is sent to the client.
+ return context.requestFilterResultBuilder()
+ .shortCircuitResponse(response)
+ .withCloseConnection()
+ .completed();
+ }
+}
+
+
+
+
+
+
Sending asynchronous requests to the Cluster
+
+
Filters can make additional asynchronous requests to the Cluster. This is useful if the Filter needs additional information from the Cluster in order to know how to mutate the filtered request/response.
+
+
+
The Filter can make use of CompletionStage chaining features ([#thenApply() etc.) to organise for actions to be done once the asynchronous request completes. For example, it could chain an action that mutates the filtered request/response using the asynchronous response, and finally, chain an action to forward the request/response to the next filter.
+
+
+
The asynchronous request/response will be intercepted by Filters upstream of this Filter. Filters downstream of this Filter (and the Client) do not see the asynchronous response.
+
+
+
Let’s take a look at an example. We’ll send an asynchronous request towards the Cluster for topic metadata while handling a FetchRequest and use the response to mutate the FetchRequest before passing it to the next filter in the chain.
+
+
+
+
publicclassFetchFilterimplements FetchRequestFilter {
+ publicstaticfinalshort METADATA_VERSION_SUPPORTING_TOPIC_IDS = (short) 12;
+
+ @Override
+ public CompletionStage<RequestFilterResult> onFetchRequest(ApiKeys apiKey,
+ RequestHeaderData header,
+ FetchRequestData request,
+ FilterContext context) {
+ // Create a header with a compatible API version.
+ // The version must be supported by both the Kafka client used by Kroxylicious and the target cluster.
+ var metadataRequestHeader = new RequestHeaderData().setRequestApiVersion(METADATA_VERSION_SUPPORTING_TOPIC_IDS);
+ // Build a metadata request matching the chosen API version.
+ // Refer to the {kafka-protocol}[Kafka Protocol Guide] for more details.
+ var metadataRequest = new MetadataRequestData();
+ var topic = new MetadataRequestData.MetadataRequestTopic();
+ topic.setTopicId(Uuid.randomUuid());
+ metadataRequest.topics().add(topic);
+ // Send request asynchronously to the cluster and obtain a CompletionStage containing the response.
+ var stage = context.sendRequest(metadataRequestHeader, metadataRequest);
+ // Use response to mutate the fetch request.
+ return stage.thenApply(metadataResponse -> mutateFetchRequest(metadataResponse, request))
+ // Forward the mutated request.
+ .thenCompose(mutatedFetchRequest -> context.forwardRequest(header, mutatedFetchRequest));
+ }
+}
+
+
+
+
As you have read above, we need to know the API version we want our request to be encoded at. Your filter can discover what versions of an API the Kafka Cluster supports. To do this use the ApiVersionsService available from the FilterContext to determine programmatically what versions of an API are support and then write code to make a suitable request object.
+
+
+
+
+
+
+
Kroxylicious provides the guarantee that computation stages chained using the default execution methods are executed on the same thread as the rest of the Filter work, so we can safely mutate Filter members without synchronising. See the io.kroxylicious.proxy.filter package javadoc for more information on thread-safety.
+
+
+
+
+
+
+
Filtering specific API Versions
+
+
+
+
Kafka has a "bidirectional" client compatibility policy. In other words, new clients can talk to old servers, and old clients can talk to new servers. This allows users to upgrade either clients or servers without experiencing any downtime.
+
+
+
Since the Kafka protocol has changed over time, clients and servers need to agree on the schema of the message that they are sending over the wire. This is done through API versioning.
+
+
+
Before each request is sent, the client sends the API key and the API version. These two 16-bit numbers, when taken together, uniquely identify the schema of the message to follow.
You may wish to restrict your Filter to only apply to specific versions of an API. For example, "intercept all FetchRequest messages greater than api version 7". To do this you can override a method named shouldHandleXyz[Request|Response] on your filter like:
For Kroxylicious to instantiate and configure your custom filter we use Java’s ServiceLoader API. Each Custom Filter should provide a corresponding FilterFactory implementation that can create an instance of your custom Filter. The factory can optionally declare a configuration class that Kroxylicious will populate (using Jackson) when loading your custom Filter. The module must package a META-INF/services/io.kroxylicious.proxy.filter.FilterFactory file containing the classnames of each filter factory implementation into the JAR file.
Then, when we configure a filter in Kroxylicious configuration like:
+
+
+
+
filterDefinitions:
+- name: my-replacer
+ type: SampleFetchResponse
+ config:
+ findValue: a
+ replacementValue: b
+
+
+
+
Kroxylicious will deserialize the config object into a SampleFilterConfig and use it to construct a SampleFetchResponseFilter passing the SampleFilterConfig instance as a constructor argument.
+
+
+
+
+
2.5. Packaging filters
+
+
Filters are packaged as standard .jar files. A typical Custom Filter jar contains:
Third-party filter JARs can be deployed using the classpath-plugins/ directory under the Kroxylicious installation directory.
+
+
+
+
+
+
+
Loading Kroxylicious plugins from the classpath-plugins/ directory is an Alpha feature: This feature may change, or be removed entirely, without warning in any future Kroxylicious release.
+
+
+
+
+
+
Each plugin should be placed in its own subdirectory:
Third-party plugin JARs, organised by plugin name.
+
+
+
+
+
+
The proxy startup script automatically scans classpath-plugins/ subdirectories and adds their contents to the Java classpath. Plugins are then discovered via ServiceLoader alongside the built-in filters.
+
+
+
If the classpath-plugins/ directory does not exist, no plugin scanning is performed and the proxy starts normally.
+
+
+
2.6.1. Dependency shading
+
+
+
+
+
+
There is currently no classloader isolation between plugins.
+
+
+
+
+
+
All plugins share a single classpath with the proxy itself. If a plugin bundles a library that is also used by the proxy or by another plugin (for example Jackson or Guava), the classpath ordering determines which version is used at runtime. This can cause subtle failures.
+
+
+
To avoid dependency conflicts, plugin authors should shade (relocate) their transitive dependencies using the Maven Shade Plugin or an equivalent tool. Shading rewrites the package names of bundled libraries so they cannot collide with other versions on the classpath.
+
+
+
+
+
+
+
3. Trademark notice
+
+
+
+
+
Apache Kafka is a registered trademark of The Apache Software Foundation.
+
+
+
Kubernetes is a registered trademark of The Linux Foundation.
+
+
+
Prometheus is a registered trademark of The Linux Foundation.
+
+
+
Strimzi is a trademark of The Linux Foundation.
+
+
+
Hashicorp Vault is a registered trademark of HashiCorp, Inc.
+
+
+
AWS Key Management Service is a trademark of Amazon.com, Inc. or its affiliates.
+
+
+
Microsoft, Azure, and Microsoft Entra are trademarks of the Microsoft group of companies.
+
+
+
Fortanix and Data Security Manager are trademarks of Fortanix, Inc.
+
+
+
+
+
+
+
+
+ 1. The context.forward*() methods behave exactly as the builder form .forward(header, message).complete()
+
+
+
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/developer-guide/index.html b/documentation/0.21.0/html/developer-guide/index.html
new file mode 100644
index 00000000..93690b89
--- /dev/null
+++ b/documentation/0.21.0/html/developer-guide/index.html
@@ -0,0 +1,10 @@
+---
+layout: guide
+title: Kroxylicious Developer Guide
+description: Writing plugins for the proxy in the Java programming language.
+tags:
+ - developer
+rank: '032'
+version: 0.21.0
+permalink: /documentation/0.21.0/html/developer-guide/
+---
diff --git a/documentation/0.21.0/html/developer-guide/toc.html b/documentation/0.21.0/html/developer-guide/toc.html
new file mode 100644
index 00000000..c4ef7514
--- /dev/null
+++ b/documentation/0.21.0/html/developer-guide/toc.html
@@ -0,0 +1,20 @@
+{% raw %}
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/developer-quick-start/content.html b/documentation/0.21.0/html/developer-quick-start/content.html
new file mode 100644
index 00000000..94ac7c19
--- /dev/null
+++ b/documentation/0.21.0/html/developer-quick-start/content.html
@@ -0,0 +1,172 @@
+{% raw %}
+
+
+
+
+
+
+
Kroxylicious' composable filter chains and pluggable API mean that you can write your own filters to apply your own rules to the Kafka protocol, using the Java programming language.
+
+
+
In this quick start guide you will build a custom filter and use it to modify messages being sent to/consumed from Kafka, learn about filter configuration and running custom filters, and find a starting point for developing your own custom filters with your own rules and logic.
+
+
+
+
+
1. Getting started
+
+
+
1.1. Prerequisites
+
+
To start developing your own custom filters for Kroxylicious, you will need to install JDK 21.
If you are using Podman, you may encounter issues with the integration tests. There are instructions here to resolve this.
+
+
+
+
+
+
+
1.2. Generate a Sample Filter Project
+
+
The easiest way to learn how to build custom filters is with our kroxylicious-filter-archetype maven archetype, which will generate some basic find-and-replace filters for you to experiment with. Begin by generating a sample project:
Note that the sample project includes automated unit and integration tests. The project’s powerful integration tests run against an in-VM Kafka cluster, enabling you to rapidly iterate on your filter’s business logic.
Send a Kafka message containing foo through the proxy. You should see the content transformed, with the word foo replaced by baz in the message received by the consumer.
Filters can be added and removed by altering the filterDefinitions list in the sample-proxy-config.yaml file. You can also reconfigure the sample filters by changing the configuration values in this file. Note that the proxy must be restarted when you modify the configuration.
+
+
+
The SampleFetchResponseFilter and SampleProduceRequestFilter each have two configuration values that must be specified for them to work:
+
+
+
+
+
findValue - the string the filter will search for in the produce/fetch data
+
+
+
replacementValue - the string the filter will replace the value above with
+
+
+
+
+
4.1. Default Configuration
+
+
The default configuration for SampleProduceRequestFilter is:
This means that it will search for the string foo in the produce data and replace all occurrences with the string bar. For example, if a Kafka Producer sent a produce request with data {"myValue":"foo"}, the filter would transform this into {"myValue":"bar"} and Kroxylicious would send that to the Kafka Broker instead.
+
+
+
The default configuration for SampleFetchResponseFilter is:
This means that it will search for the string bar in the fetch data and replace all occurrences with the string baz. For example, if a Kafka Broker sent a fetch response with data {"myValue":"bar"}, the filter would transform this into {"myValue":"baz"} and Kroxylicious would send that to the Kafka Consumer instead.
+
+
+
+
4.2. Modify
+
+
Now that you know how the sample filters work, you can start modifying them! Replace the SampleFilterTransformer logic with your own code, change which messages they apply to, or whatever else you like!
+
+
+
+
+
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/developer-quick-start/index.html b/documentation/0.21.0/html/developer-quick-start/index.html
new file mode 100644
index 00000000..7a3883d9
--- /dev/null
+++ b/documentation/0.21.0/html/developer-quick-start/index.html
@@ -0,0 +1,10 @@
+---
+layout: guide
+title: Developer Quick Start
+description: Start here if you're developing a filter for the first time.
+tags:
+ - developer
+rank: '031'
+version: 0.21.0
+permalink: /documentation/0.21.0/html/developer-quick-start/
+---
diff --git a/documentation/0.21.0/html/developer-quick-start/toc.html b/documentation/0.21.0/html/developer-quick-start/toc.html
new file mode 100644
index 00000000..241a29c9
--- /dev/null
+++ b/documentation/0.21.0/html/developer-quick-start/toc.html
@@ -0,0 +1,18 @@
+{% raw %}
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/entity-isolation-guide/content.html b/documentation/0.21.0/html/entity-isolation-guide/content.html
new file mode 100644
index 00000000..9777e17a
--- /dev/null
+++ b/documentation/0.21.0/html/entity-isolation-guide/content.html
@@ -0,0 +1,307 @@
+{% raw %}
+
+
+
+
+
+
About this guide
+
+
This guide covers using the Kroxylicious Entity Isolation Filter. This filter gives each authenticated user a private space within a Kafka cluster, isolated from other users sharing the same cluster. Isolation can be applied selectively to some entity types and not others. For example, it’s possible to have isolation for consumer groups, but not for topics.
+
+
+
+
+
1. Entity isolation overview
+
+
+
The Kroxylicious Entity Isolation filter gives each authenticated client a private, isolated view of certain Kafka entity types within a shared cluster. When entity isolation is enabled for a resource type, each client connecting through the proxy operates as if it owns that namespace entirely — it cannot see, interfere with, or accidentally collide with the same-named resources belonging to other clients.
+
+
+
The filter achieves this by intercepting Kafka RPCs on the request and response paths:
+
+
+
+
+
On the request path, resource names are transparently mapped from the client’s isolated view to a cluster-wide name that includes a client-specific prefix or transformation.
+
+
+
On the response path, the cluster-wide name is mapped back to the client’s view. Resources that do not belong to the current client’s namespace are filtered out of list and describe responses.
+
+
+
+
+
+
+
+
Figure 1. Entity Isolation filter — request and response path
+
+
+
To use the Entity Isolation filter, the proxy must determine the authenticated subject. The authenticated subject is the verified identity of the client, derived from successful authentication.
If your applications use SASL authentication, configure the SASL inspection filter to build the authenticated subject from the successful SASL exchange between the client and the broker.
+
+
+
+
+
1.1. Supported entity types
+
+
The current implementation supports isolation for the following Kafka entity types:
+
+
+
+
+ GROUP_ID
+
+
+
Consumer group identifiers used in all consumer group RPCs (e.g. JoinGroup, OffsetFetch, DeleteGroups).
+
+
+ TRANSACTIONAL_ID
+
+
+
Transactional producer identifiers used in transactional RPCs (e.g. InitProducerId, AddPartitionsToTxn, EndTxn).
+
+
+
+
+
+
+
+
+
Topic name isolation is not yet supported. For more information, see the related issue.
+
+
+
+
+
+
+
+
+
2. How entity isolation works
+
+
+
2.1. Name mapping
+
+
The filter delegates all resource name transformations to a pluggable EntityNameMapper. The mapper is given the authenticated subject of the current connection and the entity type being mapped, and is responsible for:
+
+
+
+
+
Translating a client-visible name to a cluster-visible name (mapping).
+
+
+
Translating a cluster-visible name back to a client-visible name (unmapping).
+
+
+
Deciding whether a cluster-visible name falls within the current client’s namespace.
+
+
+
+
+
The built-in mapper, PrincipalEntityNameMapper, prefixes resource names with the name of a principal from the authenticated subject. By default, the mapper extracts the io.kroxylicious.proxy.authentication.User principal type from the subject. This can be overridden to any other class that implements the io.kroxylicious.proxy.authentication.Principal interface that has the io.kroxylicious.proxy.authentication.Unique class-level annotation. The mapper requires that the authenticated subject provides exactly one principal of the expected type, otherwise the connection will be closed with an error.
+
+
+
When forming the cluster-visible name, the mapper separates the principal name from the resource name using a separator. The separator defaults to a hyphen (-), but this can also be overridden in configuration. If a client authenticates with a principal that contains the separator, the filter will disconnect the client with an error.
+
+
+
Both the principal name and the separator must contain only ASCII alphanumerics, '.', '_' and '-'. This restriction comes from Kafka itself.
+
+
+
To illustrate the default behavior of the mapper, given a principal name alice and a consumer group called my-consumer-group, the group is stored on the broker as alice-my-consumer-group.
+
+
+
+
+
+
+
+
+
The filter can only guarantee non-collision for clients that connect through the proxy. If clients connect straight to the broker, there is no mechanism to prevent them from creating resources that collide with the cluster-visible names chosen by this filter.
+
+
+
+
+
+
+
+
+
2.2. Request path
+
+
When a Kafka request arrives at the proxy:
+
+
+
+
+
The filter identifies fields in the request that reference entity types for which isolation is enabled.
+
+
+
Each matching field value is passed to the EntityNameMapper which returns the cluster-visible name.
+
+
+
The mutated request is forwarded to the broker.
+
+
+
+
+
+
2.3. Response path
+
+
When the broker’s response arrives at the proxy:
+
+
+
+
+
The filter identifies fields in the response that reference entity types for which isolation is enabled.
+
+
+
For each such field, the EntityNameMapper checks whether the value belongs to the current client’s namespace.
+
+
+
+
If it does, the value is unmapped to the client-visible name and passed through.
+
+
+
If it does not, the filter removes the containing record from the response.
+
+
+
+
+
+
The mutated response is returned to the client.
+
+
+
+
+
+
+
+
3. Configuring the Entity Isolation filter
+
+
+
This procedure describes how to set up the Entity Isolation filter by configuring it in Kroxylicious.
If your instance of the Kroxylicious Proxy runs directly on an operating system, provide the Entity Isolation filter configuration in the filterDefinitions list of your proxy configuration.
+
+
+
Here’s a complete example of a filterDefinitions entry configured for entity isolation:
entityTypes is an array of entity types to isolate. Allowed values are GROUP_ID and TRANSACTIONAL_ID.
+
+
+
mapper is the name of the naming mapping service implementation. Currently, this must be PrincipalEntityNameMapperService.
+
+
+
separator is the string that the mapper uses to separate the principal from the resource name.
+
+
+
principalType identifies the principal type within the subject that will be used to form the cluster-visible entity names. Use the fully-qualified class name of the principal. Defaults to io.kroxylicious.proxy.authentication.User if not specified.
entityTypes is an array of entity types to isolate. Allowed values are GROUP_ID and TRANSACTIONAL_ID.
+
+
+
mapper is the name of the naming mapping service implementation. Currently, this must be PrincipalEntityNameMapperService.
+
+
+
separator is the string that the mapper uses to separate the principal from the resource name.
+
+
+
principalType identifies the principal type within the subject that will be used to form the cluster-visible entity names. Use the fully-qualified class name of the principal. Defaults to io.kroxylicious.proxy.authentication.User if not specified.
+
+
+
+
+
+
+
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/entity-isolation-guide/index.html b/documentation/0.21.0/html/entity-isolation-guide/index.html
new file mode 100644
index 00000000..c887f51c
--- /dev/null
+++ b/documentation/0.21.0/html/entity-isolation-guide/index.html
@@ -0,0 +1,11 @@
+---
+layout: guide
+title: Entity Isolation Guide
+description: Using the entity isolation filter to give authenticated Kafka® clients
+ a private namespace within a Kafka cluster.
+tags:
+ - filter
+rank: '025'
+version: 0.21.0
+permalink: /documentation/0.21.0/html/entity-isolation-guide/
+---
diff --git a/documentation/0.21.0/html/entity-isolation-guide/toc.html b/documentation/0.21.0/html/entity-isolation-guide/toc.html
new file mode 100644
index 00000000..9e65da9b
--- /dev/null
+++ b/documentation/0.21.0/html/entity-isolation-guide/toc.html
@@ -0,0 +1,21 @@
+{% raw %}
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/images/cluster-filter-chain.svg b/documentation/0.21.0/html/images/cluster-filter-chain.svg
new file mode 100644
index 00000000..1d01f81f
--- /dev/null
+++ b/documentation/0.21.0/html/images/cluster-filter-chain.svg
@@ -0,0 +1,4 @@
+
+
+
\ No newline at end of file
diff --git a/documentation/0.21.0/html/images/cluster_topology_many_to_one.svg b/documentation/0.21.0/html/images/cluster_topology_many_to_one.svg
new file mode 100644
index 00000000..1861859a
--- /dev/null
+++ b/documentation/0.21.0/html/images/cluster_topology_many_to_one.svg
@@ -0,0 +1,4 @@
+
+
+
\ No newline at end of file
diff --git a/documentation/0.21.0/html/images/cluster_topology_one_to_one.svg b/documentation/0.21.0/html/images/cluster_topology_one_to_one.svg
new file mode 100644
index 00000000..a7730d81
--- /dev/null
+++ b/documentation/0.21.0/html/images/cluster_topology_one_to_one.svg
@@ -0,0 +1,4 @@
+
+
+
\ No newline at end of file
diff --git a/documentation/0.21.0/html/images/entity-isolation-seq.svg b/documentation/0.21.0/html/images/entity-isolation-seq.svg
new file mode 100644
index 00000000..0cfb39ba
--- /dev/null
+++ b/documentation/0.21.0/html/images/entity-isolation-seq.svg
@@ -0,0 +1,4 @@
+
+
+
\ No newline at end of file
diff --git a/documentation/0.21.0/html/images/filter-short-circuiting.svg b/documentation/0.21.0/html/images/filter-short-circuiting.svg
new file mode 100644
index 00000000..55d0fcb8
--- /dev/null
+++ b/documentation/0.21.0/html/images/filter-short-circuiting.svg
@@ -0,0 +1,4 @@
+
+
+
\ No newline at end of file
diff --git a/documentation/0.21.0/html/images/landscape.svg b/documentation/0.21.0/html/images/landscape.svg
new file mode 100644
index 00000000..0370d98f
--- /dev/null
+++ b/documentation/0.21.0/html/images/landscape.svg
@@ -0,0 +1,4 @@
+
+
+
\ No newline at end of file
diff --git a/documentation/0.21.0/html/images/monitoring-message-counters.svg b/documentation/0.21.0/html/images/monitoring-message-counters.svg
new file mode 100644
index 00000000..e84f4451
--- /dev/null
+++ b/documentation/0.21.0/html/images/monitoring-message-counters.svg
@@ -0,0 +1,5 @@
+
+
+
\ No newline at end of file
diff --git a/documentation/0.21.0/html/images/oauth-bearer-validation-seq.svg b/documentation/0.21.0/html/images/oauth-bearer-validation-seq.svg
new file mode 100644
index 00000000..96b5e8f6
--- /dev/null
+++ b/documentation/0.21.0/html/images/oauth-bearer-validation-seq.svg
@@ -0,0 +1,4 @@
+
+
+
\ No newline at end of file
diff --git a/documentation/0.21.0/html/images/operator-input-resource-topology.svg b/documentation/0.21.0/html/images/operator-input-resource-topology.svg
new file mode 100644
index 00000000..6865d8d4
--- /dev/null
+++ b/documentation/0.21.0/html/images/operator-input-resource-topology.svg
@@ -0,0 +1,4 @@
+
+
+
\ No newline at end of file
diff --git a/documentation/0.21.0/html/images/operator-output-resource-topology.svg b/documentation/0.21.0/html/images/operator-output-resource-topology.svg
new file mode 100644
index 00000000..9e1d2b10
--- /dev/null
+++ b/documentation/0.21.0/html/images/operator-output-resource-topology.svg
@@ -0,0 +1,4 @@
+
+
+
\ No newline at end of file
diff --git a/documentation/0.21.0/html/images/quickstart-record-encryption.svg b/documentation/0.21.0/html/images/quickstart-record-encryption.svg
new file mode 100644
index 00000000..95411877
--- /dev/null
+++ b/documentation/0.21.0/html/images/quickstart-record-encryption.svg
@@ -0,0 +1,4 @@
+
+
+
\ No newline at end of file
diff --git a/documentation/0.21.0/html/images/sasl-inspection-seq.svg b/documentation/0.21.0/html/images/sasl-inspection-seq.svg
new file mode 100644
index 00000000..4e38dcef
--- /dev/null
+++ b/documentation/0.21.0/html/images/sasl-inspection-seq.svg
@@ -0,0 +1,4 @@
+
+
+
\ No newline at end of file
diff --git a/documentation/0.21.0/html/kroxylicious-operator/content.html b/documentation/0.21.0/html/kroxylicious-operator/content.html
new file mode 100644
index 00000000..4359b468
--- /dev/null
+++ b/documentation/0.21.0/html/kroxylicious-operator/content.html
@@ -0,0 +1,3149 @@
+{% raw %}
+
+
+
+
+
+
About this guide
+
+
This guide covers using the Kroxylicious Operator to configure, deploy, secure, and operate the Kroxylicious Proxy on Kubernetes. Refer to other Kroxylicious guides for information on running the proxy outside Kubernetes or for advanced topics such as plugin development.
+
+
+
+
+
1. Kroxylicious Operator overview
+
+
+
Kroxylicious Proxy is an Apache Kafka protocol-aware ("Layer 7") proxy designed to enhance Kafka-based systems.
+
+
+
The Kroxylicious Operator is an operator for Kubernetes which simplifies deploying and operating the Kroxylicious Proxy.
The Kroxylicious Operator uses a declarative API based on Kubernetes custom resources to manage proxy deployments.
+
+
+
2.1. API resources used by the Kroxylicious Proxy
+
+
The operator takes custom resources and core Kubernetes resources as inputs:
+
+
+
+
+ KafkaProxy
+
+
+
Defines an instance of the proxy.
+
+
+ VirtualKafkaCluster
+
+
+
Represents a logical Kafka cluster that will be exposed to Kafka clients.
+
+
+ KafkaProxyIngress
+
+
+
Configures how a virtual cluster is exposed on the network to Kafka clients.
+
+
+ KafkaService
+
+
+
Specifies a backend Kafka cluster for a virtual cluster.
+
+
+ KafkaProtocolFilter
+
+
+
Specifies filter mechanisms for use with a virtual cluster.
+
+
+ Secret
+
+
+
KafkaService and KafkaProtocolFilter resources may reference a Secret to provide security-sensitive data such as TLS certificates or passwords.
+
+
+ ConfigMap
+
+
+
KafkaService and KafkaProtocolFilter resources may reference a ConfigMap to provide non-sensitive configuration such as trusted CA certificates.
+
+
+
+
+
+
+
+
Figure 1. Example input resources and the references between them
+
+
+
Based on the input resources, the operator generates the core Kubernetes resources needed to deploy the Kroxylicious Proxy, such as the following:
+
+
+
+
+ ConfigMap
+
+
+
Provides the proxy configuration file mounted into the proxy container.
+
+
+ Deployment
+
+
+
Manages the proxy Pod and container.
+
+
+ Service
+
+
+
Exposes the proxy over the network to other workloads in the same Kubernetes cluster.
+
+
+
+
+
The API is decomposed into multiple custom resources in a similar way to the Kubernetes Gateway API, and for similar reasons. You can make use of Kubernete’s Role-Based Access Control (RBAC) to divide responsibility for different aspects of the overall proxy functionality to different roles (people) in your organization.
+
+
+
For example, you might grant networking engineers the ability to configure KafkaProxy and KafkaProxyIngress, while giving application developers the ability to configure VirtualKafkaCluster, KafkaService, and KafkaProtocolFilter resources.
+
+
+
+
+
+
Figure 2. Generated Kubernetes resources and the relationships between them
+
+
+
+
2.2. Custom resource API compatibility
+
+
Kroxylicious custom resource definitions are packaged and deployed alongside the operator. Currently, there’s only a single version of the custom resource APIs: v1alpha1.
+
+
+
Future updates to the operator may introduce new versions of the custom resource APIs. At that time the operator will be backwards compatible with older versions of those APIs and an upgrade procedure will be used to upgrade existing custom resources to the new API version.
+
+
+
+
+
+
3. Installing the Kroxylicious Operator
+
+
+
This section provides instructions for installing the Kroxylicious Operator.
+
+
+
Install the proxy operator by applying the proxy installation files
+
+
+
Installation options and procedures are demonstrated using the example files included with Kroxylicious.
+
+
+
3.1. Install prerequisites
+
+
To install Kroxylicious, you will need the following:
+
+
+
+
+
A Kubernetes 1.31 or later cluster. For development purposes, Minikube may be used.
+
+
+
The kubectl command-line tool to be installed and configured to connect to the running cluster.
+
+
+
+
+
For more information on the tools available for running Kubernetes, see Install Tools in the Kubernetes documentation.
+
+
oc and kubectl commands
+
+
The oc command functions as an alternative to kubectl. In almost all cases the example kubectl commands used in this guide can be done using oc simply by replacing the command name (options and arguments remain the same).
+
+
+
In other words, instead of using:
+
+
+
+
kubectl apply -f your-file
+
+
+
+
when using OpenShift you can use:
+
+
+
+
oc apply -f your-file
+
+
+
+
+
3.2. Kroxylicious release artifacts
+
+
To use YAML manifest files to install Kroxylicious, download kroxylicious-operator-0.21.0.zip or kroxylicious-operator-0.21.0.tar.gz file from the GitHub release page, and extract the files as appropriate (for example using unzip or tar -xzf).
+
+
+
Each of these archives contains:
+
+
+
+
Installation Files
+
+
In the install directory are the YAML manifests needed to install the operator.
+
+
Examples
+
+
In the examples directory are examples of the custom resources which can be used to deploy a proxy once the operator has been installed.
+
+
+
+
+
+
3.3. Installing the operator using installation files
+
+
This procedure shows how to install the Kroxylicious Operator in your Kubernetes cluster.
+
+
+
Prerequisites
+
+
+
You need an account with permission to create and manage CustomResourceDefinition and RBAC (ClusterRole) resources.
+
+
+
You have downloaded the release artifacts and extracted the contents into the current directory.
+
+
+
+
+
Procedure
+
+
+
Edit the Kroxylicious installation files to use the namespace the operator is going to be installed into.
+
+
For example, in this procedure the operator is installed into the namespace my-kroxylicious-operator-namespace.
+
+
+
+
+
+
On Linux, use:
+
+
+
+
+
+
+
+
sed -i 's/namespace: .*/namespace: my-kroxylicious-operator-namespace/' install/*.yaml
+
+
+
+
+ On MacOS, use:
+
+
+
+
+
+
+
+
sed -i '' 's/namespace: .*/namespace: my-kroxylicious-operator-namespace/' install/*.yaml
+
+
+
+
+
+
Deploy the Kroxylicious operator:
+
+
+
kubectl create -f install
+
+
+
+
+
Check the status of the deployment:
+
+
+
kubectl get deployments -n my-kroxylicious-operator-namespace
+
+
+
+
Output shows the deployment name and readiness
+
+
NAME READY UP-TO-DATE AVAILABLE
+kroxylicious-operator 1/1 1 1
+
+
+
+
READY shows the number of replicas that are ready/expected. The deployment is successful when the AVAILABLE output shows 1.
+
+
+
+
+
+
+
+
+
4. Deploying a proxy
+
+
+
Deploy a basic proxy instance with a single virtual cluster exposed to Kafka clients on the same Kubernetes cluster.
+
+
+
4.1. Prerequisites
+
+
+
+
The Kroxylicious Operator is installed in the Kubernetes cluster.
+
+
+
A Kafka cluster is available to be proxied.
+
+
+
TLS certificate generation capability is available for ingress configurations that require TLS.
+
+
+
DNS management access is available for ingress configurations that require off-cluster access.
+
+
+
+
+
+
4.2. The required resources
+
+
4.2.1. Proxy configuration to host virtual clusters
+
+
A KafkaProxy resource represents an instance of the Kroxylicious Proxy. Conceptually, it is the top-level resource that links together KafkaProxyIngress, VirtualKafkaCluster, KafkaService, and KafkaProtocolFilter resources to form a complete working proxy.
+
+
+
KafkaProxy resources are referenced by KafkaProxyIngress and VirtualKafkaCluster resources to define how the proxy is exposed and what it proxies.
An empty spec creates a proxy with default configuration.
+
+
+
+
+
+
4.2.2. Networking configuration
+
+
A KafkaProxyIngress resource defines the networking configuration that allows Kafka clients to connect to a VirtualKafkaCluster.
+
+
+
It is uniquely associated with a single KafkaProxy instance, but it is not uniquely associated with a VirtualKafkaCluster and can be used by multiple VirtualKafkaCluster instances.
+
+
+
The KafkaProxyIngress resource supports the following ingress types to configure networking access to the virtual cluster:
+
+
+
+
+
clusterIP exposes the virtual cluster to applications running inside the same Kubernetes cluster as the proxy.
+
+
+
loadBalancer exposes the virtual cluster to applications running outside the Kubernetes cluster.
+
+
+
openShiftRoute exposes the virtual cluster to applications running outside the OpenShift cluster by using OpenShift Routes (OpenShift only)
+
+
+
+
+
The clusterIP ingress types support both TCP (plain) and TLS connections. The loadBalancer and openShiftRoute types support only TLS.
+
+
+
When using TLS, you specify a TLS server certificate in the ingress configuration of the VirtualKafkaCluster resource.
+
+
+
When using loadBalancer, changes to your DNS may be required.
+
+
+
The following table summarizes the supported ingress types.
+
+
+
Table 1. Supported ingress types
+
+
+
+
+
+
+
+
+
Ingress Type
+
Use case
+
Supported Transport
+
Requires DNS changes?
+
+
+
+
+
+
clusterIP
+
+
+
On-cluster applications
+
+
+
TCP/TLS
+
+
+
No
+
+
+
+
+
loadBalancer
+
+
+
Off-cluster applications
+
+
+
TLS only
+
+
+
Yes
+
+
+
+
+
openShiftRoute
+
+
+
Off-cluster applications
+
+
+
TLS only
+
+
+
No (hostnames are assigned by OpenShift)
+
+
+
+
+
+
+
+
+
+
TLS is recommended when connecting applications in a production environment.
+
+
+
+
+
+
clusterIP ingress type
+
+
The clusterIP ingress type exposes virtual clusters to Kafka clients running in the same Kubernetes cluster as the proxy. It supports both TCP (plain) and TLS connections.
+
+
+
The clusterIP ingress type uses Kubernetes Service resources of type ClusterIP to enable on-cluster access.
+
+
+
+ Example KafkaProxyIngress configuration for clusterIP with TCP
+
The loadBalancer ingress type allows applications running off-cluster to connect to the virtual cluster. TLS must be used with this ingress type.
+
+
+
The loadBalancer ingress type uses Kubernetes Service resources of type LoadBalancer to enable off-cluster access.
+
+
+
When using a loadBalancer ingress, the proxy uses SNI (Server Name Indication) to match the client’s requested host name to the correct virtual cluster and broker within the proxy. This means that every virtual cluster and every broker within the virtual cluster must be uniquely identifiable within DNS. To accomplish this, the following configuration must be provided:
+
+
+
+
+
A unique bootstrapAddress. This is the address that the clients initially use to connect to the virtual cluster.
+
+
+
An advertisedBrokerAddressPattern that generates unique broker addresses which clients use to connect to individual brokers.
+
+
+
+
+
You decide how to formulate the bootstrapAddress and the advertisedBrokerAddressPattern to best fit the networking conventions of your organization.
+
+
+
The advertisedBrokerAddressPatternmust contain the token $(nodeId). The proxy replaces this token with the broker’s node ID. This ensures that client connections are correctly routed to the intended broker.
+
+
+
Both bootstrapAddress and advertisedBrokerAddressPatternmay contain the token $(virtualClusterName). If this is present, it is replaced by the virtual cluster’s name. This token is necessary when the KafkaProxyIngress is being shared by many virtual clusters.
+
+
+
One possible scheme is to use the virtual cluster’s name as a subdomain within your organisation’s domain name:
+
+
+
+
$(virtualClusterName).kafkaproxy.example.com
+
+
+
+
You can then use a further subdomain for each broker:
proxyRef identifies the KafkaProxy resource that this ingress is part of.
+
+
+
loadBalancer specifies loadBalancer networking.
+
+
+
bootstrapAddress specifies the bootstrap address for clients to connect to the virtual cluster.
+
+
+
advertisedBrokerAddressPattern specifies the advertised broker address used by the proxy to generate the individual broker addresses presented to the client.
You must also configure DNS so that the bootstrap and broker address resolve from the network used by the applications.
+
+
+
+
openShiftRoute ingress type
+
+
+
+
+
+
OpenShift Routes are available only on OpenShift and are not supported on Kubernetes environments. The Kroxylicious Operator detects whether the OpenShift Route API is available on the Kubernetes server. If it is not available, the operator rejects configurations that use this ingress type.
+
+
+
+
+
+
The openShiftRoute ingress type allows applications running off-cluster to connect to the virtual cluster. TLS must be used with this ingress type.
+
+
+
The openShiftRoute ingress type uses OpenShift Route resources to enable off-cluster access.
+
+
+
When using an openShiftRoute ingress, the proxy uses SNI (Server Name Indication) to match the client’s requested host name to the correct virtual cluster and broker within the proxy.
+
+
+
The bootstrap and broker addresses are generated automatically, for example:
A proxied Kafka cluster is configured in a KafkaService resource, which specifies how the proxy connects to the cluster. The Kafka cluster may or may not be running in the same Kubernetes cluster as the proxy: Network connectivity is all that’s required.
+
+
+
This example shows a KafkaService defining how to connect to a Kafka cluster at kafka.example.com.
bootstrapServers specifies a comma-separated list of addresses in <host>:<port> format. Including multiple broker addresses helps clients connect when one is unavailable.
+
+
+
nodeIdRanges declares the IDs of all the broker nodes in the Kafka cluster
+
+
+
name is optional, but specifying it can make errors easier to diagnose.
+
+
+
start defines the start of the ID range, inclusive.
+
+
+
end defines the end of the ID range, inclusive.
+
+
+
+
+
+
4.2.4. Virtual cluster configuration
+
+
A VirtualKafkaCluster resource defines a logical Kafka cluster that is accessible to clients over the network.
+
+
+
The virtual cluster references the following resources, which must be in the same namespace:
+
+
+
+
+
A KafkaProxy resource that the proxy is part of.
+
+
+
One or more KafkaProxyIngress resources that expose the virtual cluster to Kafka clients and provide virtual-cluster-specific configuration to the ingress (such as TLS certificates and other parameters).
+
+
+
A KafkaService resource that defines the backend Kafka cluster.
+
+
+
One or more KafkaProtocolFilter resources that filter the Kafka protocol traffic between clients and the backend Kafka cluster. The order of the filters in the filterRefs array defines the order that the filters are applied. Client requests pass through the filters starting at index 0, then 1, and continuing to index n. Broker responses pass through the same filters in the reverse order. If the filterRefs array is empty (or null), the traffic will pass through the cluster unchanged.
+
+
+
+
+
The following example shows configuration for a VirtualKafkaCluster that is exposed it to Kafka clients running on the same Kubernetes cluster. Protocol traffic is filtered by the my-filter filter.
+
+
+
+ Example VirtualKafkaCluster configuration with a single ingress and filter.
+
proxyRef identifies the KafkaProxy resource that this virtual cluster is part of.
+
+
+
targetKafkaServiceRef specifies the KafkaService that defines the Kafka cluster proxied by the virtual cluster.
+
+
+
ingressRef defines the ingresses that expose the virtual cluster. Each ingress references a KafkaProxyIngress by name.
+
+
+
filterRefs specifies the list of filters applied to Kafka protocol traffic.
+
+
+
+
+
The following example shows configuration for a VirtualKafkaCluster that is exposed to Kafka clients running both on and off the cluster, using TLS. Because TLS is used, the ingress configuration must reference a TLS server certificate. This example has two filters.
+
+
+
+ Example VirtualKafkaCluster configuration with two TLS-enabled ingresses
+
name in the ingress named cluster-ip references a Secret containing the server certificate for the clusterIP ingress.
+
+
+
name in the ingress named load-balancer references a Secret containing the server certificate for the loadBalancer ingress.
+
+
+
+
+
Generating TLS certificates for clusterIP ingress type
+
+
When using the clusterIP ingress type with the TLS protocol, you must provide suitable TLS certificates to secure communication.
+
+
+
The basic steps are as follows:
+
+
+
+
+
Generate a TLS server certificate that covers the service names assigned to the virtual cluster by the ingress.
+
+
+
Provide the certificate to the virtual cluster using a Kubernetes Secret of type kubernetes.io/tls.
+
+
+
+
+
The exact procedure for generating the certificate depends on the tooling and processes used by your organization.
+
+
+
The certificate must meet the following criteria:
+
+
+
+
+
The certificate needs to be signed by a CA that is trusted by the on-cluster applications that connect to the virtual cluster.
+
+
+
The format of the key must be PKCS#8 encoded PEM (Privacy Enhanced Mail). It must not be password protected.
+
+
+
The certificate must use SANs (Subject Alternate Names) to list all service names or use a wildcard TLS certificate that covers them all. Assuming a virtual cluster name of my-cluster, an ingress name of cluster-ip, and a Kafka cluster using node IDs (0-2), the following SANs must be listed in the certificate:
<secret-name> is the name of the secret to be created, <namespace> is the name of the namespace where the proxy is to be deployed, and <path/to/cert/file> and <path/to/key/file> are the paths to the certificate and key files.
+
+
+
+
Generating TLS certificates for loadBalancer ingress type
+
+
When using loadBalancer ingress type, you must provide suitable TLS certificates to secure communication.
+
+
+
The basic steps are as follows:
+
+
+
+
+
Generate a TLS server certificate that covers the bootstrap and broker names assigned to the virtual cluster by the ingress.
+
+
+
Provide the certificate to the virtual cluster using a Kubernetes Secret of type kubernetes.io/tls.
+
+
+
+
+
The exact procedure for generating the certificate depends on the tooling and processes used by your organization.
+
+
+
The certificate must meet the following criteria:
+
+
+
+
+
The certificate needs to be signed by a CA that is trusted by the off-cluster applications that connect to the virtual cluster.
+
+
+
The format of the key must be PKCS#8 encoded PEM (Privacy Enhanced Mail). It must not be password protected.
+
+
+
The certificate must use SANs (Subject Alternate Names) to list the bootstrap and all the broker names or use a wildcard TLS certificate that covers them all. Assuming a bootstrapAddress of $(virtualClusterName).kafkaproxy.example.com, an advertisedBrokerAddressPattern of broker-$(nodeId).$(virtualClusterName).kafkaproxy.example.com, a Kafka cluster using node IDs (0-2), and a virtual cluster name of my-cluster, the following SANs must be listed in the certificate:
<secret-name> is the name of the secret to be created, <namespace> is the name of the namespace where the proxy is to be deployed, and <path/to/cert/file> and <path/to/key/file> are the paths to the certificate and key files.
+
+
+
+
Configuring DNS for load balancer ingress
+
+
When using the loadBalancer ingress type, you must ensure that both the bootstrapAddress and the names generated from advertisedBrokerAddressPattern resolve to the external address of the Kubernetes Service underlying the load balancer on the network where the off-cluster applications run.
+
+
+
Prerequisites
+
+
+
The Kroxylicious Operator is installed.
+
+
+
KafkaProxy, VirtualKafkaCluster, and KafkaProxyIngress resources are deployed.
+
+
+
The VirtualKafkaCluster and KafkaProxyIngress resources are configured to use a loadBalancer ingress.
+
+
+
DNS can be configured on the network where the off-cluster applications run.
+
+
+
Network traffic can to flow from the application network run to the external addresses provided by the Kubernetes cluster.
+
+
+
+
+
Procedure
+
+
+
If using Minikube as your Kubernetes environment, enable the Minikube Load Balancer tunnel by running the following command. Use a separate console window to do this as the command needs to stay running for the tunnel to work.
+
+
+
minikube tunnel
+
+
+
+
+
Run the following command to discover the external address being used by the load balancer:
+
+
+
kubectl get virtualkafkacluster -n <namespace> <virtual-cluster-name> -o=jsonpath='{.status.ingresses[?(@.name == "<ingress-name>")].loadBalancerIngressPoints}'
+
+
+
+
Replace <namespace> with the name of the Kubernetes namespace where the resources are deployed, replace <ingress-name> with the name of the KafkaProxyIngresses and replace <virtual-cluster-name> with the name of the VirtualKafkaCluster resource.
+
+
+
Depending on your Kubernetes environment, the command returns an object containing an IP address or a hostname. This is the external address of the load balancer.
+
+
+
+
Configure your DNS so that the bootstrap and broker names resolve to the external address.
+
+
Assuming a bootstrapAddress of $(virtualClusterName).kafkaproxy.example.com, an advertisedBrokerAddressPattern of broker-$(nodeId).$(virtualClusterName).kafkaproxy.example.com, a Kafka cluster uses node IDs (0-2), and a virtual cluster name of my-cluster, the following DNS mappings are listed:
Generating TLS certificates for openshiftRoute ingress type
+
+
When using the openshiftRoute ingress type, you must provide suitable TLS certificates to secure communication.
+
+
+
The basic steps are as follows:
+
+
+
+
+
Generate a TLS server certificate that covers the bootstrap and broker names assigned to the virtual cluster by the ingress.
+
+
+
Provide the certificate to the virtual cluster using a Kubernetes Secret of type kubernetes.io/tls.
+
+
+
+
+
The exact procedure for generating the certificate depends on the tooling and processes used by your organization.
+
+
+
The certificate must meet the following criteria:
+
+
+
+
+
The certificate needs to be signed by a CA that is trusted by the off-cluster applications that connect to the virtual cluster.
+
+
+
The format of the key must be PKCS#8 encoded PEM (Privacy Enhanced Mail). It must not be password protected.
+
+
+
The certificate must use SANs (Subject Alternate Names) to list the bootstrap and all the broker names or use a wildcard TLS certificate that covers them all. Assuming a bootstrapAddress of $(virtualClusterName)-bootstrap.ingress-domain:443, an advertisedBrokerAddressPattern of $(virtualClusterName)-$(nodeId).ingress-domain:443, a Kafka cluster using node IDs (0-2), and a virtual cluster name of my-cluster, the following SANs must be listed in the certificate:
<secret-name> is the name of the secret to be created, <namespace> is the name of the namespace where the proxy is to be deployed, and <path/to/cert/file> and <path/to/key/file> are the paths to the certificate and key files.
+
+
+
+
+
+
4.3. Filters
+
+
A KafkaProtocolFilter resource represents a Kroxylicious Proxy filter. It is not uniquely associated with a VirtualKafkaCluster or KafkaProxy instance; it can be used in a number of VirtualKafkaCluster instances in the same namespace.
+
+
+
A KafkaProtocolFilter is similar to one of the items in a proxy configuration’s filterDefinitions:
+
+
+
+
+
The resource’s metadata.name corresponds directly to the name of a filterDefinitions item.
+
+
+
The resource’s spec.type corresponds directly to the type of a filterDefinitions item.
+
+
+
The resource’s spec.configTemplate corresponds to the config of a filterDefinitions item, but is subject to interpolation by the operator.
+
+
+
+
+
+
+
+
5. Operating a proxy
+
+
+
Monitor the operational status of the proxy and configure resource usage. This section explains how to check the status of the KafkaProxyIngress and VirtualKafkaCluster resources, and how to set CPU and memory requests and limits for the proxy container.
+
+
+
This section assumes you have a running Kroxylicious Proxy instance.
+
+
+
5.1. Checking the status of the VirtualKafkaCluster resource
+
+
The status of a VirtualKafkaCluster resource provides feedback on its configuration through a set of conditions. These include the ResolvedRefs condition, which indicates whether all referenced resources exist, and the Accepted condition, which indicates whether the cluster’s configuration was successfully applied to the proxy.
+
+
+
5.1.1. ResolvedRefs conditions
+
+
When you create a VirtualKafkaCluster, the operator checks whether the following exist:
+
+
+
+
+
A KafkaProxy matching spec.proxyRef.
+
+
+
Each KafkaProxyIngress specified in spec.ingresses, and whether they refer to the same KafkaProxy as the virtual cluster.
+
+
+
A Secret referred to in the tls property.
+
+
+
+
+
The result is reported in status.conditions with a ResolvedRefs condition accordingly.
+
+
+
+ Example VirtualKafkaCluster status when all referenced resources exist
+
The status.observedGeneration in the status matches the metadata.generation, indicating that the status is up-to-date for the latest spec.
+
+
+
The conditions array reports any issues with the resource.
+
+
+
+
ResolvedRefs is the condition type used to report any issues with referenced resources.
+
+
+
status value of the condition. If True, all references resources resolve correctly. False indicates there are issues.
+
+
+
+
+
+
+
+
A status value of False means that one or more of the referenced resources is missing. In this case, the condition includes reason and message properties with more details.
+
+
+
+
5.1.2. Accepted conditions
+
+
When a VirtualKafkaCluster has a valid spec, the operator attempts to configure the proxy instance accordingly. This might not be possible. For example, the spec may be valid but incompatible with other virtual clusters running in the same proxy instance.
+
+
+
The operator sets a condition type of Accepted in status.conditions to indicate whether or not a virtual cluster has been successfully configured within a proxy instance.
+
+
+
+
+
5.2. Checking the status of the KafkaProxyIngress resource
+
+
The status of a KafkaProxyIngress resource provides feedback on its configuration through a set of conditions. These include the ResolvedRefs condition, which indicates whether all referenced resources exist.
+
+
+
When you create a KafkaProxyIngress, the operator checks whether a KafkaProxy corresponding to the spec.proxyRef exists. The result is reported in status.conditions with a ResolvedRefs condition accordingly.
+
+
+
+ Example KafkaProxyIngress status when spec.proxyRef exists
+
The status.observedGeneration in the status matches the metadata.generation, indicating that the status is up-to-date for the latest spec.
+
+
+
The conditions array reports any issues with the resource.
+
+
+
+
ResolvedRefs is the condition type used to report any issues with referenced resources.
+
+
+
status value of the condition. If True, all references resources resolve correctly. False indicates there are issues.
+
+
+
+
+
+
+
+
A status value of False means that the KafkaProxy resource is missing. In this case, the condition includes reason and message properties with more details.
+
+
+
+
5.3. Configuring Proxy container CPU and memory resource limits and requests
+
+
When you define a KafkaProxy resource, a number of Kubernetes Pods are created, each with a proxy container. Each of these containers runs a single Kroxylicious Proxy process.
+
+
+
By default, these proxy containers are defined without resource limits. To manage CPU and memory consumption in your environment, modify the proxyContainer section within your KafkaProxy specification.
+
+
+
+ Example KafkaProxy configuration with proxy container resource specification
+
Secure proxies by using TLS and storing sensitive values in external resources.
+
+
+
6.1. Prerequisites
+
+
+
+
A running Kroxylicious Proxy instance
+
+
+
+
+
+
6.2. Securing the client-to-proxy connection
+
+
Secure client-to-proxy communications using TLS.
+
+
+
6.2.1. TLS configuration for client-to-proxy connections
+
+
This example shows a VirtualKafkaCluster, exposing it to Kafka clients running on the same Kubernetes cluster. It uses TLS as the transport protocol so that communication between Kafka clients and the proxy is encrypted.
proxyRef identifies the KafkaProxy resource that this virtual cluster is part of. It must be in the same namespace as the VirtualKafkaCluster.
+
+
+
targetKafkaServiceRef identifies the KafkaService that is proxied by this virtual cluster. It must be in the same namespace as the VirtualKafkaCluster.
+
+
+
ingresses defines how traffic flows into the virtual cluster.
+
+
+
+
ingressRef references a KafkaProxyIngress. A virtual cluster can be exposed by one or more ingresses, each of which must exist in the same namespace as the VirtualKafkaCluster.
+
+
+
If the referenced ingress requires TLS, the tls property must provide a TLS server certificate to use.
+
+
+
+
+
+
+
+
+ Example KafkaProxyIngress configuration for TLS
+
proxyRef identifies the KafkaProxy that this ingress belongs. It must be in the same namespace as the KafkaProxyIngress.
+
+
+
clusterIP exposes the proxy to Kafka clients inside the same Kubernetes cluster using a ClusterIP service.
+
+
+
+
protocol specifies the transport protocol used by the ingress. In this example, it is set to TLS.
+
+
+
+
+
+
+
+
+
6.2.2. Mutual TLS configuration for client-to-proxy connections
+
+
You can configure a virtual cluster ingress to request or require Kafka clients to authenticate to the proxy using TLS. This configuration is known as mutual TLS (mTLS), because both the client and the proxy authenticate each other using TLS.
+
+
+
+ Example VirtualKafkaCluster configuration requiring clients to present a trusted certificate provided from a ConfigMap
+
trustAnchorRef references a ConfigMap resource containing the CA certificates to trust.
+
+
+
+
kind is optional and defaults to ConfigMap if omitted.
+
+
+
+
+
+
tlsClientAuthentication specifies whether client authentication is required (REQUIRED), requested (REQUESTED), or disabled (NONE). If a trustAnchorRef is specified, the default is REQUIRED.
+
+
+
+
+
+ Example VirtualKafkaCluster configuration requiring clients to present a trusted certificate provided from a Secret
+
trustAnchorRef references a Secret resource containing the CA certificates to trust.
+
+
+
+
kind must specify Secret.
+
+
+
+
+
+
+
+
+
6.2.3. TLS version configuration for client-to-proxy connections
+
+
Some older versions of TLS (and SSL before it) are now considered insecure. These versions remain enabled by default in order to maximize interoperability between TLS clients and servers that only support older versions.
+
+
+
If the Kafka cluster than you want to connect to supports newer TLS versions, you can disable the proxy’s support for older, insecure versions. For example, if the Kafka cluster supports TLSv1.1, TLSv1.2 and TLSv1.3 you might choose to enable only TLSv1.3 support. This would reduce the susceptibility to a TLS downgrade attack.
+
+
+
+
+
+
+
It is good practice to disable insecure protocol versions.
+
+
+
+
+
+
You can restrict which TLS protocol versions the proxy supports for client-to-proxy connections by configuring the protocols property.
+
+
+
+ Example VirtualKafkaCluster with restricted TLS protocol versions
+
6.2.4. TLS cipher suite configuration for client-to-proxy connections
+
+
A cipher suite is a set of cryptographic algorithms that together provide the security guarantees offered by TLS. During TLS negotiation, a server and client agree on a common cipher suite that they both support.
+
+
+
Some older cipher suites are now considered insecure, but may be enabled on the Kafka cluster to allow older clients to connect.
+
+
+
The cipher suites enabled by default in the proxy depend on the JVM used in the proxy image and the TLS protocol version that is negotiated.
+
+
+
To prevent TLS downgrade attacks, you can disable cipher suites known to be insecure or no longer recommended. However, the proxy and the cluster must support at least one cipher suite in common.
+
+
+
+
+
+
+
It is good practice to disable insecure cipher suites.
+
+
+
+
+
+
You can restrict which TLS cipher suites the proxy uses when negotiating client-to-proxy connections by configuring the cipherSuites property.
+
+
+
+ Example VirtualKafkaCluster configuration using cipherSuites to allow specific ciphers
+
cipherSuites configures the cipher suites used by the proxy.
+
+
+
+
allowed lists the cipher suites which are permitted, in preference order.
+
+
+
+
+
+
+
+
Alternatively, you can use denied to specify cipher suites to exclude. The names of the cipher suites supported depend on the JVM in the proxy container image.
6.3.1. TLS trust configuration for proxy-to-cluster connections
+
+
By default, the proxy uses the platform’s default trust store when connecting to the proxied cluster over TLS. This works if the cluster’s TLS certificates are signed by a well-known public Certificate Authority (CA), but fails if they’re signed by a private CA instead.
+
+
+
+
+
+
+
It is good practice to configure trust explicitly, even when proxied cluster’s TLS certificates are signed by a public CA.
+
+
+
+
+
+
This example configures a KafkaService to trust TLS certificates signed by any Certificate Authority (CA) listed in the trusted-cas.pem entry of the ConfigMap named trusted-cas.
+
+
+
+ Example KafkaService configuration for trusting certificates provided by a ConfigMap
+
name is the name of the Secret providing the CA certificates. The resource must exist in the same namespace as the KafkaService
+
+
+
kind must be Secret.
+
+
+
key identifies the entry in the resource. The value must be a PEM-encoded set of CA certificates.
+
+
+
+
+
+
6.3.2. TLS authentication to proxied Kafka clusters
+
+
Some Kafka clusters require mutual TLS (mTLS) authentication. You can configure the proxy to present a TLS client certificate using the KafkaService resource.
+
+
+
The TLS client certificate you provide must have been issued by a Certificate Authority (CA) that’s trusted by the proxied cluster.
+
+
+
This example configures a KafkaService to use a TLS client certificate stored in a Secret named tls-cert-for-kafka.example.com.
+
+
+
+ Example KafkaService configuration with TLS client authentication.
+
certificateRef identifies the TLS client certificate to use.
+
+
+
+
kind is optional and defaults to Secret. The Secret should have type: kubernetes.io/tls.
+
+
+
name is the name of the resource of the given kind. This resource must exist in the same namespace as the KafkaService
+
+
+
+
+
+
+
+
+
6.3.3. TLS version configuration for proxy-to-cluster connections
+
+
Some older versions of TLS (and SSL before it) are now considered insecure. These versions remain enabled by default in order to maximize interoperability between TLS clients and servers that only support older versions.
+
+
+
If the Kafka cluster than you want to connect to supports newer TLS versions, you can disable the proxy’s support for older, insecure versions. For example, if the Kafka cluster supports TLSv1.1, TLSv1.2 and TLSv1.3 you might choose to enable only TLSv1.3 support. This would reduce the susceptibility to a TLS downgrade attack.
+
+
+
+
+
+
+
It is good practice to disable insecure protocol versions.
+
+
+
+
+
+
This example configures a KafkaService to allow only TLS v1.3 when connecting to kafka.example.com.
+
+
+
+ Example KafkaService with restricted TLS protocol versions.
+
The protocols property also supports deny, if you prefer to list the versions to exclude instead. The names of the TLS protocol versions supported depend on the JVM in the proxy container image.
6.3.4. TLS cipher suite configuration for proxy-to-cluster connections
+
+
A cipher suite is a set of cryptographic algorithms that together provide the security guarantees offered by TLS. During TLS negotiation, a server and client agree on a common cipher suite that they both support.
+
+
+
Some older cipher suites are now considered insecure, but may be enabled on the Kafka cluster to allow older clients to connect.
+
+
+
The cipher suites enabled by default in the proxy depend on the JVM used in the proxy image and the TLS protocol version that is negotiated.
+
+
+
To prevent TLS downgrade attacks, you can disable cipher suites known to be insecure or no longer recommended. However, the proxy and the cluster must support at least one cipher suite in common.
+
+
+
+
+
+
+
It is good practice to disable insecure cipher suites.
+
+
+
+
+
+
+ Example KafkaService configured so the proxy negotiates TLS connections using only the listed cipher suites
+
The cipherSuites property also supports denied, if you prefer to list the cipher suites to exclude instead. The names of the cipher suites supported depend on the JVM in the proxy container image.
Secure filters by using the security features provided by each filter and storing sensitive values in external resources such as a Kubernetes Secret.
+
+
+
6.4.1. Security-sensitive values in filter resources
+
+
Filter resources can be configured to handle security-sensitive values like passwords or keys by referencing Kubernetes Secret and ConfigMap resources.
+
+
+
Template use and value interpolation
+
+
Interpolation is supported in spec.configTemplate for the automatic substitution of placeholder values at runtime. This allows security-sensitive values, such as passwords or keys, to be specified in Kubernetes Secret resources rather than directly in the KafkaProtocolFilter resource. Likewise, things like trusted CA certificates can be defined in ConfigMap resources.
+
+
+
The operator determines which Secret and ConfigMap resources are referenced by a KafkaProtocolFilter resource and declares them as volumes in the proxy Pod, mounted into the proxy container. This example shows how to configure the RecordEncryptionFilter using a Vault KMS deployed in the same Kubernetes cluster.
type is the filter class name of the Record Encryption filter. If the unqualified name is ambiguous, it must be qualified by the filter package name.
+
+
+
The KafkaProtocolFilter requires a configTemplate, which supports interpolation references.
+
+
+
The password uses an interpolation reference, enclosed by ${ and } instead of a literal value. The operator supplies the value at runtime from the specified Secret.
+
+
+
The selector template is interpreted by the proxy. It uses different delimiters, $( and ), than the interpolation reference.
+
+
+
+
+
+
Structure of interpolation references
+
+
Let’s look at the example interpolation reference ${secret:vault:token} in more detail.
+
+
+
It starts with ${ and ends with }. Between these, it is broken into three parts, separated by colons (:):
+
+
+
+
+
secret is a provider. Supported providers are secret and configmap (note the use of lower case).
+
+
+
vault is a path. The interpretation of the path depends on the provider.
+
+
+
token is a key. The interpretation of the key also depends on the provider.
+
+
+
+
+
For both secret and configmap providers:
+
+
+
+
+
The path is interpreted as the name of a Secret or ConfigMap resource in the same namespace as the KafkaProtocolFilter resource.
+
+
+
The key is interpreted as a key in the data property of the Secret or ConfigMap resource.
+
+
+
+
+
+
+
+
+
+
7. Advanced proxy tuning
+
+
+
Configure advanced network and connection settings for the proxy.
+
+
+
7.1. Advanced network tuning for a proxy
+
+
The spec.network section of a KafkaProxy resource provides low-level tuning options for the proxy listener and management HTTP server. The defaults are suitable for most deployments — only configure these settings if you have a specific operational reason to do so.
+
+
+
+ Example KafkaProxy with network tuning applied
+
spec.network.proxy configures network tuning for the proxy listener that handles Kafka client connections. All fields are optional.
+
+
+
workerThreadCount is the number of threads available to process requests across client connections. Each connection is pinned to one thread, but a single thread can serve many connections. More threads increase parallelism but also CPU consumption. Tune this in conjunction with the pod’s CPU limits and validate under realistic load. Defaults to twice the number of available processors.
+
+
+
shutdownQuietPeriod is the grace period during which the proxy continues to accept and complete in-flight requests before shutting down. If no new requests arrive during this window, shutdown proceeds. Defaults to 2s if not specified.
+
+
+
shutdownTimeout is the maximum time allowed for the proxy to complete shutdown, including the quiet period. If shutdown does not complete within this period, it is forced. Defaults to 15s if not specified. Set this to a value less than the pod’s terminationGracePeriodSeconds (Kubernetes default: 30s) to ensure the proxy can finish gracefully before Kubernetes forcibly terminates the pod.
+
+
+
spec.network.management configures network tuning for the management HTTP server that serves metrics and health endpoints. Supports the same settings as the proxy listener, and can be configured independently.
+
+
+
+
+
Duration values use a string-based Go-style duration format (for example, 30s, 5m). Supported units are: m (minutes), s (seconds), ms (milliseconds), μs or us (microseconds), and ns (nanoseconds). Units can be combined, for example 1m30s.
+
+
+
+
7.2. Configuring idle connection timeouts
+
+
The proxy can automatically disconnect idle client connections to reclaim resources. Idle timeout configuration is optional and disabled by default. Enable it only when required for your deployment. Idle timeouts are configured under spec.network.proxy in the KafkaProxy resource and apply only to the proxy listener — they are not available for the management HTTP server. When enabled, idle disconnects are observable via the kroxylicious_client_to_proxy_disconnects_total metric.
+
+
+
7.2.1. When to enable idle timeouts
+
+
Consider enabling idle timeouts in the following scenarios:
+
+
+
+
+
Security posture: unauthenticated connections can be closed quickly to limit the window for abuse, while authenticated connections get a more generous timeout.
+
+
+
Unclosed connections: clients that abandon connections without properly closing them, leaving resources allocated unnecessarily.
+
+
+
Network infrastructure requirements: environments where firewalls or load balancers drop idle connections, configure the proxy to disconnect gracefully first.
+
+
+
+
+
+
7.2.2. When not to enable idle timeouts
+
+
Avoid enabling idle timeouts in the following scenarios:
+
+
+
+
+
Legitimate idle connections: applications that maintain long-lived connections with extended idle periods, such as consumers with long poll timeouts or applications using connection pooling.
+
+
+
Stable network infrastructure: environments with reliable network infrastructure and no issues with idle connection management.
+
+
+
+
+
+
7.2.3. How idle timeouts work
+
+
The proxy supports two independent timeout settings that apply at different stages of the connection lifecycle:
+
+
+
+
+
Unauthenticated timeout (unauthenticatedIdleTimeout): applies to connections where the proxy has not yet detected completed authentication. The proxy considers authentication complete if either of the following is true:
+
+
+
+
A transport subject builder (a component that extracts an authenticated identity from transport-layer attributes) creates a subject with an identity (for example, from a client TLS certificate).
+
+
+
A SASL inspection or termination filter invokes io.kroxylicious.proxy.filter.FilterContext.clientSaslAuthenticationSuccess.
+
+
+
+
+
+
Authenticated timeout (authenticatedIdleTimeout): applies to connections where an identity has been established, for the remainder of the connection’s lifetime.
+
+
+
+
+
+
+
+
+
+
+
For the proxy to detect authentication completion, you must configure either TLS client certificate authentication or a SASL inspection or termination filter. Without one of these, all connections remain in the unauthenticated state for their entire lifetime, and authenticatedIdleTimeout has no effect. For more information, see the SASL inspection filter guide.
+
+
+
+
+
+
+
+
Both timeout settings are optional and have no default values. You can configure one, both, or neither depending on your requirements. Timeout values use a string-based duration format, following Go conventions (for example, 30s, 5m). Supported units are: h (hours), m (minutes), s (seconds), ms (milliseconds), μs or us (microseconds), and ns (nanoseconds). Units can be combined, for example 1m30s.
+
+
+
+
7.2.4. Configuration examples
+
+
+ Example KafkaProxy configuration with unauthenticated and authenticated idle timeouts
+
unauthenticatedIdleTimeout Disconnect unauthenticated connections after 30 seconds of inactivity. If omitted, or zero, no idle timeout is applied.
+
+
+
authenticatedIdleTimeout Disconnect authenticated connections after 10 minutes of inactivity. If omitted, or zero, no idle timeout is applied.
+
+
+
+
+
+
7.2.5. Monitoring idle disconnects
+
+
The proxy tracks idle disconnects using the kroxylicious_client_to_proxy_disconnects_total metric with cause="idle_timeout". This counter increments each time a connection is closed after exceeding the configured idle timeout.
+
+
+
The kroxylicious_client_to_proxy_disconnects_total metric also tracks other disconnect scenarios:
+
+
+
+
+
cause="idle_timeout" - Connection exceeded the configured idle timeout duration
+
+
+
cause="client_closed" - The downstream client initiated the connection close
+
+
+
cause="server_closed" - The upstream node closed the connection, causing the proxy to close the client connection
Kroxylicious supports key observability features to help you understand the performance and health of your proxy instances.
+
+
+
The Kroxylicious Proxy and Kroxylicious Operator generate metrics for real-time monitoring and alerting, as well as logs that capture their actions and behavior. You can integrate these metrics with a monitoring system like Prometheus for ingestion and analysis, while configuring log levels to control the granularity of logged information.
+
+
+
8.1. Overview of proxy metrics
+
+
The proxy provides metrics for both connections and messages. These metrics are categorized into downstream (client-side) and upstream (broker-side) groups They allow users to assess the impact of the proxy and its filters on their Kafka system.
+
+
+
+
+
Connection metrics count the connections made from the downstream (incoming connections from the clients) and the connection made by the proxy to upstream (outgoing connections to the Kafka brokers).
Connection metrics count the TCP connections made from the client to the proxy (kroxylicious_client_to_proxy_request_total) and from the proxy to the broker (kroxylicious_proxy_to_server_connections_total). These metrics count connection attempts, so the connection count is incremented even if the connection attempt ultimately fails.
+
+
+
In addition to the count metrics, there are active connection gauge metrics that track the current number of open connections, and error metrics.
+
+
+
+
+
If an error occurs whilst the proxy is accepting a connection from the client the kroxylicious_client_to_proxy_errors_total metric is incremented by one.
+
+
+
If an error occurs whilst the proxy is attempting a connection to a broker the kroxylicious_proxy_to_server_errors_total metric is incremented by one.
+
+
+
+
+
Connection and connection error metrics include the following labels: virtual_cluster (the virtual cluster’s name) and node_id (the broker’s node ID). When the client connects to the bootstrap endpoint of the virtual cluster, a node ID value of bootstrap is recorded.
+
+
+
The kroxylicious_client_to_proxy_errors_total metric also counts connection errors that occur before a virtual cluster has been identified. For these specific errors, the virtual_cluster and node_id labels are set to an empty string ("").
+
+
+
+
+
+
+
Error conditions signaled within the Kafka protocol response (such as RESOURCE_NOT_FOUND or UNKNOWN_TOPIC_ID) are not classed as errors by these metrics.
+
+
+
+
+
+
Understanding connection metrics relationships
+
+
The proxy provides several related metrics for tracking connections:
+
+
+
+
+
Connection counters (kroxylicious_*_connections_total) track the total number of connection attempts over time. These values only increase and provide a historical view of connection activity.
+
+
+
Active connection gauges (kroxylicious_*_active_connections) show the current number of open connections at any given moment. These values increase when connections are established and decrease when connections are closed.
+
+
+
Error counters (kroxylicious_*_errors_total) track connections that closed due to errors.
+
+
+
Disconnect counters (kroxylicious_client_to_proxy_disconnects_total) track connections that closed without errors, categorized by cause.
+
+
+
+
+
When a connection closes, it increments either the error counter or one of the disconnect counter causes, but never both. The active connection gauge decreases regardless of whether the closure was due to an error or a clean disconnect.
+
+
+
The following relationship holds:
+
+
+
Active connections = Connections total - (Errors total + sum of all Disconnect causes)
+
+
+
Table 2. Connection metrics for client and broker interactions
+
+
+
+
+
+
+
+
+
Metric Name
+
Type
+
Labels
+
Description
+
+
+
+
+
+
kroxylicious_client_to_proxy_connections_total
+
+
+
Counter
+
+
+
virtual_cluster, node_id
+
+
+
Incremented by one every time a connection is accepted from a client by the proxy.
+
+ This metric counts all connection attempts that reach the proxy, even those that end in error.
+
+
+
+
+
kroxylicious_client_to_proxy_errors_total
+
+
+
Counter
+
+
+
virtual_cluster, node_id
+
+
+
Incremented by one every time a connection is closed due to any downstream error.
+
+
+
+
+
kroxylicious_proxy_to_server_connections_total
+
+
+
Counter
+
+
+
virtual_cluster, node_id
+
+
+
Incremented by one every time a connection is made to the server from the proxy.
+
+ This metric counts all connections attempted to the broker, even those that end in error.
+
+
+
+
+
kroxylicious_proxy_to_server_errors_total
+
+
+
Counter
+
+
+
virtual_cluster, node_id
+
+
+
Incremented by one every time a connection is closed due to any upstream error.
+
+
+
+
+
kroxylicious_client_to_proxy_active_connections
+
+
+
Gauge
+
+
+
virtual_cluster, node_id
+
+
+
Shows the current number of active TCP connections from clients to the proxy.
+
+ This gauge reflects real-time connection state and decreases when connections are closed.
+
+
+
+
+
kroxylicious_proxy_to_server_active_connections
+
+
+
Gauge
+
+
+
virtual_cluster, node_id
+
+
+
Shows the current number of active TCP connections from the proxy to servers.
+
+ This gauge reflects real-time connection state and decreases when connections are closed.
+
+
+
+
+
kroxylicious_client_to_proxy_disconnects_total
+
+
+
Counter
+
+
+
virtual_cluster, node_id, cause
+
+
+
Incremented by one every time a client connection is closed by the proxy. The cause label indicates the reason for disconnection:
+
+ idle_timeout - Connection exceeded the configured idle timeout duration (requires idle timeouts configured via network.proxy.unauthenticatedIdleTimeout or network.proxy.authenticatedIdleTimeout).
+
+ client_closed - Client initiated the connection close.
+
+ server_closed - Backend server closed the connection, causing the proxy to close the client connection.
+
+ Note: Error-based disconnects are tracked separately via kroxylicious_client_to_proxy_errors_total, not this metric.
+
+
+
+
+
+
+
+
8.1.2. Message metrics
+
+
Message metrics count and record the sizes of the Kafka protocol requests and responses that flow through the proxy.
+
+
+
Use these metrics to help understand:
+
+
+
+
+
the number of messages flowing through the proxy.
+
+
+
the overall volume of data through the proxy.
+
+
+
the effect the filters are having on the messages.
+
+
+
Downstream metrics
+
+
+
+
kroxylicious_client_to_proxy_request_total counts requests as they arrive from the client.
+
+
+
kroxylicious_proxy_to_client_response_total counts responses as they are returned to the client.
+
+
+
kroxylicious_client_to_proxy_request_size_bytes is incremented by the size of each request as it arrives from the client.
+
+
+
kroxylicious_proxy_to_client_response_size_bytes is incremented by the size of each response as it is returned to the client.
+
+
+
+
+
+
Upstream metrics
+
+
+
+
kroxylicious_proxy_to_server_request_total counts requests as they go to the broker.
+
+
+
kroxylicious_server_to_proxy_response_total counts responses as they are returned by the broker.
+
+
+
kroxylicious_proxy_to_server_request_size_bytes is incremented by the size of each request as it goes to the broker.
+
+
+
kroxylicious_server_to_proxy_response_size_bytes is incremented by the size of each response as it is returned by the broker.
+
+
+
+
+
+
+
+
The size recorded is the encoded size of the protocol message. It includes the 4 byte message size.
+
+
+
Filters can alter the flow of messages through the proxy or the content of the message. This is apparent through the metrics.
+
+
+
+
+
If a filter sends a short-circuit, or closes a connection the downstream message counters will exceed the upstream counters.
+
+
+
If a filter changes the size of the message, the downstream size metrics will be different to the upstream size metrics.
+
+
+
+
+
+
+
+
Figure 3. Downstream and upstream message metrics in the proxy
+
+
+
Message metrics include the following labels: virtual_cluster (the virtual cluster’s name), node_id (the broker’s node ID), api_key (the message type), api_version, and decoded (a flag indicating if the message was decoded by the proxy).
+
+
+
When the client connects to the bootstrap endpoint of the virtual cluster, metrics are recorded with a node ID value of bootstrap.
+
+
+
Table 3. Kafka message metrics for proxy request and response flow
Incremented by the size of the message each time a response goes from the proxy to a client.
+
+
+
+
+
+
+
+
8.2. Overview of operator metrics
+
+
The Kroxylicious Operator is implemented using the Java Operator SDK. The Java Operator SDK exposes metrics that allow its behavior to be understood. These metrics are enabled by default in the Kroxylicious Operator.
Metrics from the Kroxylicious Proxy and Kroxylicious Operator can be ingested into your Prometheus instance. The proxy and the operator each expose an HTTP endpoint for Prometheus metrics at the /metrics address. The endpoint does not require authentication.
+
+
+
For the Proxy, the port that exposes the scrape endpoint is named management. For the Operator, the port is named http.
+
+
+
Prometheus can be configured to ingest the metrics from the scrape endpoints.
+
+
+
This guide assumes you are using the Prometheus Operator to configure Prometheus.
+
+
+
8.3.1. Ingesting operator metrics
+
+
This procedure describes how to ingest metrics from the Kroxylicious Operator into Prometheus.
The Prometheus Operator reconfigures Prometheus automatically. Prometheus begins to regularly to scrape the proxy’s metric.
+
+
+
+
Check the metrics are being ingested using a PromQL query such as:
+
+
+
kroxylicious_build_info
+
+
+
+
+
+
+
+
+
8.4. Setting log levels
+
+
You can independently control the logging level of both the Kroxylicious Operator and the Kroxylicious Proxy.
+
+
+
In both cases, logging levels are controlled using two environment variables:
+
+
+
+
+
KROXYLICIOUS_APP_LOG_LEVEL controls the logging of the application (io.kroxylicious loggers). It defaults to INFO.
+
+
+
KROXYLICIOUS_ROOT_LOG_LEVEL controls the logging level at the root. It defaults to WARN.
+
+
+
+
+
When trying to diagnose a problem, start first by raising the logging level of KROXYLICIOUS_APP_LOG_LEVEL. If more detailed diagnostics are required, try raising the KROXYLICIOUS_ROOT_LOG_LEVEL. Both the proxy and operator use Apache Log4J2 and use logging levels understood by it: TRACE, DEBUG, INFO, WARN, and ERROR.
+
+
+
+
+
+
+
Running the operator or the proxy at elevated logging levels, such as DEBUG or TRACE, can generate a large volume of logs, which may consume significant storage and affect performance. Run at these levels only as long as necessary.
+
+
+
+
+
+
8.4.1. Overriding proxy logging levels
+
+
This procedure describes how to override the logging level of the Kroxylicious Proxy.
+
+
+
Prerequisites
+
+
+
An instance of Kroxylicious deployed by the Kroxylicious Operator.
+
+
+
+
+
Procedure
+
+
+
Apply the KROXYLICIOUS_APP_LOG_LEVEL or KROXYLICIOUS_ROOT_LOG_LEVEL environment variable to the proxy’s Kubernetes Deployment resource:
+
+
+
kubectl set env -n <namespace> deployment <deployment_name> KROXYLICIOUS_APP_LOG_LEVEL=DEBUG
+
+
+
+
The Deployment resource has the same name as the KafkaProxy.
+
+
+
Kubernetes recreates the proxy pod automatically.
+
+
+
+
Verify that the new logging level has taken affect:
8.4.2. Overriding the operator logging level (operator installed by bundle)
+
+
This procedure describes how to override the logging level of the Kroxylicious Operator. It applies when the operator was installed from the YAML bundle.
+
+
+
Prerequisites
+
+
+
Kroxylicious Operator installed from the YAML bundle.
+
+
+
+
+
Procedure
+
+
+
Apply the KROXYLICIOUS_APP_LOG_LEVEL or KROXYLICIOUS_ROOT_LOG_LEVEL environment variable to the operator’s Kubernetes Deployment:
+
+
+
kubectl set env -n kroxylicious-operator deployment kroxylicious-operator KROXYLICIOUS_APP_LOG_LEVEL=DEBUG
+
+
+
+
Kubernetes recreates the operator pod automatically.
+
+
+
+
Verify that the new logging level has taken affect:
Glossary of terms used in the Kroxylicious documentation.
+
+
+
+
API
+
+
Application Programmer Interface.
+
+
CA
+
+
Certificate Authority. An organization that issues certificates.
+
+
CR
+
+
Custom Resource. An instance resource of a CRD. In other words, a resource of a kind that is not built into Kubernetes.
+
+
CRD
+
+
Custom Resource Definition. A Kubernetes API for defining Kubernetes API extensions.
+
+
mTLS
+
+
Mutual Transport Layer Security. A configuration of TLS where the client presents a certificate to a server, which the server authenticates.
+
+
TLS
+
+
The Transport Layer Security. A secure transport protocol where a server presents a certificate to a client, which the client authenticates. TLS was previously known as the Secure Sockets Layer (SSL).
+
+
TCP
+
+
The Transmission Control Protocol.
+
+
+
+
+
+
+
10. Trademark notice
+
+
+
+
+
Apache Kafka is a registered trademark of The Apache Software Foundation.
+
+
+
Kubernetes is a registered trademark of The Linux Foundation.
+
+
+
Prometheus is a registered trademark of The Linux Foundation.
+
+
+
Strimzi is a trademark of The Linux Foundation.
+
+
+
Hashicorp Vault is a registered trademark of HashiCorp, Inc.
+
+
+
AWS Key Management Service is a trademark of Amazon.com, Inc. or its affiliates.
+
+
+
Microsoft, Azure, and Microsoft Entra are trademarks of the Microsoft group of companies.
+
+
+
Fortanix and Data Security Manager are trademarks of Fortanix, Inc.
+
+
+
+
+
+
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/kroxylicious-operator/index.html b/documentation/0.21.0/html/kroxylicious-operator/index.html
new file mode 100644
index 00000000..f965dd8d
--- /dev/null
+++ b/documentation/0.21.0/html/kroxylicious-operator/index.html
@@ -0,0 +1,11 @@
+---
+layout: guide
+title: Kroxylicious Operator for Kubernetes
+description: Using the Kroxylicious Operator to deploy and run the Proxy in a Kubernetes
+ environment.
+tags:
+ - kubernetes
+rank: '020'
+version: 0.21.0
+permalink: /documentation/0.21.0/html/kroxylicious-operator/
+---
diff --git a/documentation/0.21.0/html/kroxylicious-operator/toc.html b/documentation/0.21.0/html/kroxylicious-operator/toc.html
new file mode 100644
index 00000000..6cff87ab
--- /dev/null
+++ b/documentation/0.21.0/html/kroxylicious-operator/toc.html
@@ -0,0 +1,51 @@
+{% raw %}
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/kroxylicious-proxy/content.html b/documentation/0.21.0/html/kroxylicious-proxy/content.html
new file mode 100644
index 00000000..263e9a43
--- /dev/null
+++ b/documentation/0.21.0/html/kroxylicious-proxy/content.html
@@ -0,0 +1,3535 @@
+{% raw %}
+
+
+
+
+
+
About this guide
+
+
This guide covers installing, configuring, securing, and operating the Kroxylicious Proxy in a non-Kubernetes environment. Refer to other Kroxylicious guides for information on running the proxy on Kubernetes using the Kroxylicious Operator, or for advanced topics such as plugin development.
+
+
+
+
+
1. Kroxylicious Proxy overview
+
+
+
Kroxylicious is an Apache Kafka protocol-aware ("Layer 7") proxy designed to enhance Kafka-based systems. Through its filter mechanism it allows additional behavior to be introduced into a Kafka-based system without requiring changes to either your applications or the Kafka cluster itself. Built-in filters are provided as part of the solution.
+
+
+
Functioning as an intermediary, the Kroxylicious mediates communication between a Kafka cluster and its clients. It takes on the responsibility of receiving, filtering, and forwarding messages.
+
+
+
A Java API provides a convenient means for implementing custom logic within the proxy.
Proxies are a powerful and flexible architectural pattern. For Kafka, they can be used to add functionality to Kafka clusters which is not available out-of-the-box with Apache Kafka. In an ideal world, such functionality would be implemented directly in Apache Kafka. But there are numerous practical reasons that can prevent this, for example:
+
+
+
+
+
Organizations having very niche requirements which are unsuitable for implementation directly in Apache Kafka.
+
+
+
Functionality which requires changes to Kafka’s public API and which the Apache Kafka project is unwilling to implement. This is the case for broker interceptors, for example.
+
+
+
Experimental functionality which might end up being implemented in Apache Kafka eventually. For example using Kroxylicious it’s easier to experiment with alternative transport protocols, such as Quic, or operating system APIs, such as io_uring, because there is already support for this in Netty, the networking framework on which Kroxylicious is built.
+
+
+
+
+
1.1.1. How Kroxylicious works
+
+
First let’s define the concepts in the landscape surrounding Kroxylicious.
+
+
+
+
+
Kafka Client, or Client refers to any client application using a Kafka Client library to talk to a Kafka Cluster.
+
+
+
Kafka Cluster or Cluster refers to a cluster comprising one or more Kafka Brokers.
+
+
+
Downstream refers to the area between Kafka Client and Kroxylicious.
+
+
+
Upstream refers to the area between Kroxylicious and a Kafka Cluster.
+
+
+
+
+
+
+
+
Figure 1. Kroxylicious landscape
+
+
+
Now let’s define some concepts used within Kroxylicious itself.
+
+
+
Virtual cluster
+
+
The Virtual Cluster is the downstream representation of a Kafka Cluster. At the conceptual level, a Kafka Client connects to a Virtual Cluster. Kroxylicious proxies all communications made to the Virtual Cluster through to a (physical) Kafka Cluster, passing it through the Filter Chain.
+
+
+
+
Virtual cluster gateway
+
+
Each virtual cluster has one or more dedicated gateways, which Kafka clients use to establish connections.
+
+
+
Each gateway exposes a bootstrap endpoint, which the Kafka Client must specify in its configuration as the bootstrap.servers property.
+
+
+
In addition to the bootstrap endpoint, the gateway automatically exposes broker endpoints. There is one broker endpoint for each broker of the physical cluster. When the Client connects to a broker endpoint, Kroxylicious proxies all communications to the corresponding broker of the (physical) Kafka Cluster.
+
+
+
Kroxylicious automatically intercepts all the Kafka RPC responses that contain a broker address. It rewrites the address so that it refers to the corresponding broker endpoint of the Virtual Cluster. This means when the Kafka Client goes to connect to, say broker 0, it does so through the Virtual Cluster.
+
+
+
Defining multiple gateways for a virtual cluster is useful when exposing it across different network segments. For example, in Kubernetes, you might configure one gateway for on-cluster traffic and another for off-cluster traffic.
+
+
+
+
Target cluster
+
+
The Target Cluster is the definition of physical Kafka Cluster within the Kroxylicious itself.
+
+
+
A Virtual Cluster has exactly one Target Cluster.
+
+
+
There can be a one-to-one relationship between Virtual Clusters and Target Clusters. The other possibility is many-to-one, where many Virtual Clusters point to the same Target Cluster. The many-to-one pattern is exploited by filters such as multi-tenancy (Kroxylicious Multi-Tenancy guide).
+
+
+
+
+
+
Figure 2. One-to-One relationship between Virtual Cluster and Target Cluster
+
+
+
+
+
+
Figure 3. Many-to-one between Virtual Cluster and Target Cluster
+
+
+
A one-to-many pattern, where one Virtual Cluster points to many Target Clusters (providing amalgamation), is not a supported use-case.
+
+
+
+
Filter chain
+
+
A Filter Chain consists of an ordered list of pluggable protocol filters.
+
+
+
A protocol filter implements some logic for intercepting, inspecting and/or manipulating Kafka protocol messages. Kafka protocol requests (such as Produce requests) pass sequentially through each of the protocol filters in the chain, beginning with the 1st filter in the chain and then following with the subsequent filters, before being forwarded to the broker.
+
+
+
When the broker returns a response (such as a Produce response) the protocol filters in the chain are invoked in the reverse order (that is, beginning with the nth filter in the chain, then the n-1th and so on) with each having the opportunity to inspect and/or manipulating the response. Eventually a response is returned to the client.
+
+
+
The description above describes only the basic capabilities of the protocol filter. Richer features of filters are described later.
+
+
+
+
+
+
Figure 4. Illustration of a request and response being manipulated by filters in a chain
+
+
+
As mentioned above, Kroxylicious takes the responsibility to rewrite the Kafka RPC responses that carry broker address information so that they reflect the broker addresses exposed by the Virtual Cluster. These are the Metadata, DescribeCluster and FindCoordinator responses. This processing is entirely transparent to the work of the protocol filters. Filter authors are free to write their own filters that intercept these responses too.
+
+
+
+
Filter composition
+
+
An important principal for the protocol filter API is that filters should compose nicely. That means that filters generally don’t know what other filters might be present in the chain, and what they might be doing to messages. When a filter forwards a request or response it doesn’t know whether the message is being sent to the next filter in the chain, or straight back to the client.
+
+
+
Such composition is important because it means a proxy user can configure multiple filters (possibly written by several filter authors) and expect to get the combined effect of all of them.
+
+
+
It’s never quite that simple, of course. In practice, they will often need to understand what each filter does in some detail in order to be able to operate their proxy properly, for example by understanding whatever metrics each filter is emitting.
+
+
+
+
+
1.1.2. Implementation
+
+
The proxy is written in Java, on top of Netty. The usual ChannelHandlers provided by the Netty project are used where appropriate (e.g. SSL support uses SslHandler), and Kroxylicious provides Kafka-specific handlers of its own.
+
+
+
The Kafka-aware parts use the Apache Kafka project’s own classes for serialization and deserialization.
+
+
+
Protocol filters get executed using a handler-per-filter model.
+
+
+
+
1.1.3. Deployment topologies
+
+
The proxy supports a range of possible deployment topologies. Which style is used depends on what the proxy is meant to achieve, architecturally speaking. Broadly speaking a proxy instance can be deployed:
+
+
+
+
As a forward proxy
+
+
Proxying the access of one or more clients to a particular cluster/broker that might also accessible (to other clients) directly.
+
+
Topic-level encryption provides one example use case for a forward proxy-style deployment. This might be applicable when using clients that don’t support interceptors, or if an organization wants to apply the same encryption policy in a single place, securing access to the keys within their network.
+
+
+
As a reverse proxy
+
+
Proxying access for all clients trying to reach a particular cluster/broker.
+
+
Transparent multi-tenancy provides an example use case for a reverse proxy. While Apache Kafka itself has some features that enable multi-tenancy, they rely on topic name prefixing as the primary mechanism for ensuring namespace isolation. Tenants have to adhere to the naming policy and know they’re a tenant of a larger shared cluster.
+
+
+
Transparent multi-tenancy means each tenant has the illusion of having their own cluster, with almost complete freedom over topic and group naming, while still actually sharing a cluster.
+
+
+
+
+
+
We can further classify deployment topologies in how many proxy instances are used. For example:
+
+
+
+
+
Single proxy instance (sidecar)
+
+
+
Proxy pool
+
+
+
+
+
+
+
1.2. Compatibility
+
+
Kroxylicious aims to provide consistent compatibility across its public interfaces, including APIs, configuration syntax, and supported plugin types. Kafka compatibility is maintained in alignment with Apache Kafka’s protocol guarantees.
+
+
+
1.2.1. APIs
+
+
Kroxylicious follows Semantic Versioning rules. While we are still in the initial development phase (denoted by a major version 0), we still take API compatibility very seriously. We aim to provide at least two minor releases between deprecation and the removal of that deprecated item.
+
+
+
We also consider our configuration file syntax a public API (though not the Java model backing it). As such, the syntax follows the same Semantic Versioning and deprecation rules.
+
+
+
Kubernetes custom resources are a public API, and we are making every effort to evolve Kroxylicious custom resources in line with Kubernetes best practices.
+
+
+
Kubernetes resources have their own versioning scheme, which is independent of the Kroxylicious Proxy service version. As a result, Kroxylicious may reach version 1.0.0 while still using alpha or beta versions of the custom resources.
+
+
+
Third-party plugins
+
+
Kroxylicious supports loading third-party plugins to extend the core functionality of the project. While these plugins are configured and loaded as first-class entities within Kroxylicious, we cannot guarantee the compatibility of their APIs or configuration properties.
+
+
+
We do however hold filters and plugins provided by the project to the same standards as the rest of the public API.
+
+
+
+
+
1.2.2. Wire protocol
+
+
Kroxylicious offers the same backwards and forwards compatibility guarantees as Apache Kafka. We support the same range of client and broker versions as the official Apache Kafka Java client.
+
+
+
+
+
+
+
2. Configuring proxies
+
+
+
Fine-tune your deployment by configuring proxies to include additional features according to your specific requirements.
+
+
+
2.1. Outline of a Kroxylicious configuration
+
+
The following example shows the overall outline of a simple Kroxylicious configuration. While not complete (as indicated by # …), it illustrates the essential structure.
filterDefinitions lists filter definitions. Each definition comprises:
+
+
+
+
(Required) A name that identifies the filter. Must be unique within the list.
+
+
+
A type that specifies the filter implementation class. Required.
+
+
+
config provides filter configuration for non-trivial filters. The required configuration depends on the filter implementation.
+
+
+
+
+
+
defaultFilters defines an ordered list of filters that the traffic sent to the virtual cluster will pass through. You can override this list at the virtual cluster level.
+
+
+
virtualClusters defines the virtual clusters and their cluster-specific configurations.
+
+
+
+
(Required) A name that identifies the virtual cluster. Must be unique within the list.
+
+
+
targetCluster defines the Kafka cluster that the virtual cluster (my-cluster-proxy) proxies.
+
+
+
gateways defines the gateways configuration for this virtual cluster.
+
+
+
subjectBuilder (Optional) defines the transport subject builder configuration.
+
+
+
proxyProtocol (Optional) Enable HAProxy PROXY protocol decoding when deployed behind a PROXY protocol-capable load balancer.
+
+
+
+
+
+
+
+
+
2.2. Defining filters
+
+
Filters in Kroxylicious can be defined globally with filterDefinitions, applied by default using defaultFilters, or customized for specific virtual clusters. The following example shows how these elements work together flexibly:
+
+
+
Example configuration showing global filter definitions applied as defaults and to virtual cluster
The order of definitions in filterDefinitions does not matter.
+
+
+
Each filter definition in filterDefinitions must have a unique name, but you can have multiple definitions with the same type and different configurations (as with encryption and special-encryption in the example).
+
+
+
The order of defaultFilters determines the sequence in which the filters are applied to incoming client requests. In the example, records are first validated and then encrypted.
+
+
+
The defaultFilters are used for all virtual clusters which don’t define their own filters, such as my-proxy-with-default-filters.
+
+
+
The defaultFilters property is optional. It is useful when all virtual clusters must use the same filters. There’s no need to specify it if all virtual clusters have specific filters defined.
+
+
+
When a virtual cluster has defined filters, like my-proxy-with-custom-filters, then those filters are used instead of the defaultFilters.
+
+
+
When using defaultFilters or a virtual cluster’s filters to reference a filter definition, you must define a filter with the corresponding name in filterDefinitions.
+
+
+
+
+
+
2.3. Configuring virtual clusters
+
+
A Kafka cluster is represented by the proxy as a virtual cluster. Clients communicate with the virtual cluster rather than the actual cluster. When Kroxylicious is deployed, it includes configuration to create virtual clusters.
+
+
+
A virtual cluster has exactly one target cluster, but many virtual clusters can target the same cluster. Each virtual cluster targets a single listener on the target cluster, so multiple listeners on the Kafka side are represented as multiple virtual clusters by the proxy. Clients connect to a virtual cluster using a bootstrapServers address. The virtual cluster has a bootstrap address that maps to each broker in the target cluster. When a client connects to the proxy, communication is proxied to the target broker by rewriting the address. Responses back to clients are rewritten to reflect the appropriate network addresses of the virtual clusters.
+
+
+
You can secure virtual cluster connections from clients and to target clusters.
+
+
+
Kroxylicious accepts key material in the following formats:
+
+
+
+
+
PKCS #12 keystore format (Public-Key Cryptography Standards).
+
+
+
Keys and certificates as separate PEM (Privacy Enhanced Mail) files. The key must be in PKCS #8 format.
+
+
+
JKS (Java KeyStore) keystore format.
+
+
+
+
+
+
2.4. Configuring virtual cluster gateways
+
+
Clients connect to a virtual cluster gateway. Each gateway provides a bootstrap address for the initial connection. The gateway also facilitates communication between clients and proxied brokers. This can be implemented in two ways:
+
+
+
+
Port Identifies Node
+
+
The gateway binds separate ports—one for each broker as well as an additional one for the bootstrap address. Clients make connections to the different port numbers to interact with each broker.
+
+
SNI Host Identifies Node
+
+
The gateway assigns a unique hostname to each broker. Clients make connections to these distinct hostnames to interact with the respective brokers. The gateway uses SNI (Server Name Indication) to identify the target broker for the client’s connection.
+
+
+
+
+
+
+
+
+
You must make sure that the gateway’s bootstrap address and generated broker addresses are resolvable and routable by the Kafka Client. You must also make sure firewall rules permit traffic to required port numbers.
+
+
+
+
+
+
2.4.1. Port Identifies Node
+
+
In the Port Identifies Node scheme, the virtual cluster opens a separate port for each proxied broker in addition to a separate port for the bootstrap.
+
+
+
By default, this scheme assumes that the target cluster comprises three nodes with broker ids 0..2. If this is inadequate, additional configuration can be provided describing the broker topology of the target broker.
+
+
+
This scheme can be used with both plain and TLS downstream connections.
+
+
+
This scheme works best with straightforward configurations where the target cluster uses a known minimum broker ID and uses stable sets of broker IDs. For more complex cases, it is recommended to use the SNI Host Identifies Node scheme.
+
+
+
+
+
+
+
When using this scheme, you have the responsibility to avoid port number collision. Ensure that each gateway has its own range of port numbers and these do not overlap with the range used by another gateway, or the gateway of another virtual cluster.
portIdentifiesNode configures the gateway to use port identifies node mode.
+
+
+
+
bootstrapAddress provides the bootstrap address used by Kafka clients.
+
+
+
+
+
+
+
+
With the example configuration above, the gateway exposes a target cluster of up to three brokers with node ids 0, 1, 2. The advertised address is defaulted to that of the bootstrap host name. Port numbers are assigned sequentially beginning at bootstrap port number + 1.
+
+
+
The gateway exposes the following three broker addresses:
+
+
+
+
+
+
+
+
+
Node Id
+
Broker Address
+
+
+
+
+
+
0
+
+
+
localhost:9193
+
+
+
+
+
1
+
+
+
localhost:9194
+
+
+
+
+
2
+
+
+
localhost:9195
+
+
+
+
+
+
Example Port Identifies Node configuration with customized broker address
advertisedBrokerAddressPattern specifies the pattern used to form advertised broker addresses. If not defined, it defaults to the hostname part of the bootstrap address.
+
+
+
brokerStartPort specifies the starting port number for broker assignment. Defaults to the port of the bootstrap address plus 1.
+
+
+
+
+
With the example configuration above, the gateway exposes a target cluster of up to three brokers with node ids 0, 1, 2. The advertised broker address is defined as mycluster.example.com. Port numbers are assigned sequentially beginning at 9200.
+
+
+
The gateway exposes the following three broker addresses:
+
+
+
+
+
+
+
+
+
Node Id
+
Broker Address
+
+
+
+
+
+
0
+
+
+
mycluster.example.com:9200
+
+
+
+
+
1
+
+
+
mycluster.example.com:9201
+
+
+
+
+
2
+
+
+
mycluster.example.com:9202
+
+
+
+
+
+
Example Port Identifies Node configuration with customized node ranges
nodeIdRanges defines one or more node ID ranges. If omitted, defaults to a single range of node IDs 0..2 (inclusive).
+
+
+
+
+
With the example configuration above, the gateway exposes a target cluster of up to six nodes with node ids 1..3 and 101..103. The advertised broker address is defined as mycluster.example.com. Port numbers are assigned sequentially beginning at 9193 (bootstrap port number + 1).
+
+
+
The gateway exposes the following six broker addresses:
+
+
+
+
+
+
+
+
+
Node Id
+
Broker Address
+
+
+
+
+
+
1
+
+
+
mycluster.example.com:9193
+
+
+
+
+
2
+
+
+
mycluster.example.com:9194
+
+
+
+
+
3
+
+
+
mycluster.example.com:9195
+
+
+
+
+
101
+
+
+
mycluster.example.com:9196
+
+
+
+
+
102
+
+
+
mycluster.example.com:9197
+
+
+
+
+
103
+
+
+
mycluster.example.com:9198
+
+
+
+
+
+
The advertisedBrokerAddressPattern configuration parameter accepts the $(nodeId) replacement token, which is optional. If included, $(nodeId) is replaced by the broker’s node.id (or broker.id) in the target cluster.
+
+
+
+
2.4.2. SNI Host Identifies Node
+
+
In the SNI Host Identifies Node scheme, unique broker hostnames are used to know where to route the traffic. As this scheme relies on SNI (Server Name Indication), which is a TLS extension, TLS connections are required. It cannot be used with plain text connections.
+
+
+
In this scheme, you can either share the port across multiple virtual cluster gateways or assign a separate port for each virtual cluster gateway. However, you cannot use a port that is already assigned to a virtual cluster gateway using the Port Identifies Node scheme.
+
+
+
+
+
+
+
When using this scheme, you have the responsibility to make sure that DNS for bootstrap and brokers resolve to an IP address that is routed to the proxy. Wildcard DNS is one way to achieve this.
sniHostIdentifiesNode configures the gateway to use SNI host identifies node.
+
+
+
+
bootstrapAddress is the bootstrap address used by Kafka clients.
+
+
+
advertisedBrokerAddressPattern is the advertised broker address pattern used to form broker addresses. It must include the placeholder $(nodeId) which is substituted for the node ID.
+
+
+
+
+
+
tls provides TLS configuration for the gateway.
+
+
+
+
key provides TLS configuration.
+
+
+
+
+
+
+
+
With the example configuration above, the gateway accepts all traffic on port 9192. Any TLS connections received with the SNI of mycluster.example.com are routed as bootstrap. Any connections received with SNI matching mybroker-$(nodeId).mycluster.example.com are routed to the upstream broker with the same node ID. The configuration exposes a target cluster with any number of brokers. It does not need prior knowledge of the node IDs used by the brokers.
+
+
+
The gateway exposes the following broker addresses:
+
+
+
+
+
+
+
+
+
Node Id
+
Broker Address
+
+
+
+
+
+
0
+
+
+
mybroker-0.mycluster.example.com:9192
+
+
+
+
+
…
+
+
+
…
+
+
+
+
+
n
+
+
+
mybroker-n.mycluster.example.com:9192
+
+
+
+
+
+
Both the advertisedBrokerAddressPattern and bootstrapAddress configuration parameters accept the $(virtualClusterName) replacement token, which is optional. If included, $(virtualClusterName) is replaced with the name of the gateway’s virtual cluster.
+
+
+
Example SNI Host Identifies Node configuration with customized advertised port
advertisedBrokerAddressPattern is the advertised broker address pattern used to form broker addresses, including a customized port number.
+
+
+
+
+
With the example configuration above, Kroxylicious is instructed to listen on port 9192, but advertise brokers of this virtual cluster as being available on port 443. This feature is useful where a network intermediary (such as another proxy or load balancer) is port forwarding.
+
+
+
The gateway exposes the following broker addresses:
+
+
+
+
+
+
+
+
+
Node Id
+
Broker Address
+
+
+
+
+
+
0
+
+
+
mybroker-0.mycluster.example.com:443
+
+
+
+
+
…
+
+
+
…
+
+
+
+
+
n
+
+
+
mybroker-n.mycluster.example.com:443
+
+
+
+
+
+
+
+
+
+
Single port operation may have cost advantages when using load balancers of public clouds, as it allows a single cloud provider load balancer to be shared across all virtual clusters.
+
+
+
+
+
+
+
+
2.5. Securing connections from clients
+
+
To secure client connections to virtual clusters, configure TLS within the virtual cluster gateway by doing the following:
+
+
+
+
+
Obtain a server certificate for the virtual cluster from a Certificate Authority (CA).
+
+ Ensure the certificate matches the names of the virtual cluster gateway’s bootstrap and broker addresses.
+
+ This may require wildcard certificates and Subject Alternative Names (SANs).
+
+
+
Provide the TLS configuration using the tls properties in the virtual cluster gateway’s configuration to enable it to present the certificate to clients. Depending on your certificate format, apply one of the following examples.
+
+
+
For mutual TLS, use the trust properties to configure the virtual cluster gateway to use TLS client authentication.
+
+
+
If required, you can restrict the TLS protocols and cipher suites that are used to form the TLS connection.
+
+
+
+
+
Examples below illustrate how these steps may be done.
+
+
+
+
+
+
+
TLS is recommended for production configurations.
+
+
+
+
+
+
Example applying a PKCS #12 server certificate to a virtual cluster gateway
privateKeyFile refers to a private key of the virtual cluster gateway.
+
+
+
certificateFile refers to a public certificate of the virtual cluster gateway.
+
+
+
keyPassword defines the password used decrypt the private key. If the private key file is not encrypted, omit the keyPassword field.
+
+
+
+
passwordFilepasswordFile refers to a file containing the password for the key.
+
+
+
+
+
+
+
+
You can configure the virtual cluster gateway to require that clients present a certificate for authentication. The virtual cluster gateway verifies that the client’s certificate is signed by one of the CA certificates contained in a trust store. If verification fails, the client’s connection is refused.
+
+
+
Example applying TLS client authentication using a PKCS #12 truststore
storeFile refers to a PKCS #12 store file containing the private-key and certificate/intermediates of the virtual cluster gateway.
+
+
+
+
storePassword defines the password used decrypt the store file. Omit the storePassword field if the store is not password protected.
+
+
+
+
passwordFile Password to protect the PKCS #12 store.
+
+
+
+
+
+
keyPassword defines the password used decrypt the key.
+
+
+
+
passwordFile refers to a file containing the password for the key. If a password is not specified, the storePassword is used to decrypt the key too.
+
+
+
+
+
+
+
+
+
storeType identifies the keystore type. If a keystore type is not specified, the default JKS (Java Keystore) type is used.
+
+
+
clientAuth specifies the client authentication mode. If set to REQUIRED, the client must present a valid certificate. If set to REQUESTED, the client is requested to present a certificate. If presented, the certificate is validated. If the client chooses not to present a certificate the connection is still allowed. If set to NONE, client authentication is disabled. If a client authentication mode is not specified, then the default behaviour is REQUIRED.
+
+
+
+
+
+
+
+
+
The client’s identity, as established through TLS client authentication, is currently not relayed to the target cluster. For more information, see the related issue.
+
+
+
+
+
+
You can restrict the TLS protocols by specifying either an allow list of TLS protocols to be enabled, or a deny list of TLS protocols to be disallowed from the platform’s default. If both an allow and a deny list are specified, the resulting list of TLS protocols includes only those protocols from the allow list that are not in the deny list. If neither list is specified, the virtual cluster uses the default TLS protocols provided by the platform.
+
+
+
When the client connects, it negotiates the highest mutually acceptable TLS protocol with the virtual cluster. If the two have no protocols in common, the connection fails.
You can restrict the TLS cipher suite by specifying either an allow list of cipher suites to be enabled, in preference order, or a deny list of ciphers suites to be disallowed from the platform’s default. If both an allow and a deny list are specified, the resulting list of cipher suites includes only those ciphers from the allow list that are not in the deny list. If neither list is specified, the virtual cluster uses the default cipher suites (and preference order) provided by the platform.
+
+
+
When the client connects, it negotiates the most preferred mutually acceptable cipher suite with the virtual cluster. If the two have no cipher suites in common, the connection fails.
The target cluster defines the physical upstream Kafka cluster that is proxied by the my-cluster-proxy virtual cluster.
+
+
+
The target cluster configuration defines three properties:
+
+
+
+
+
bootstrapServers: Comma-separated list of bootstrap addresses for the Kafka cluster being proxied. If the cluster exposes a bootstrap endpoint (as is the case with some cloud-based Kafka services or clusters managed by Strimzi), use that address. If the cluster does not have a specific bootstrap endpoint (as is the case with a standard Kafka deployment) list the address of several broker nodes. This helps spread the bootstrapping load and improves reliability.
+
+
+
bootstrapServerSelection.strategy: The bootstrapping process selects one bootstrap server from the list of configured servers. This property defines the strategy used to select the bootstrap server. The following strategies are supported:
+
+
+
+
round-robin: This strategy selects a server from the configured list of servers in a round-robin manner starting from the first server, looping back after the last server is selected. This is the default strategy used when no strategy is explicitly configured.
+
+
+
random: This strategy selects a random server from the configured list of servers.
The following examples illustrate the strategies that can be used for selecting a bootstrap server from the list of configured addresses.
+
+
+
When multiple bootstrap servers are configured with no bootstrap server selection strategy explicitly configured, the bootstrap server is selected using the round-robin strategy.
strategy specifies the strategy for bootstrap server selection.
+
+
+
+
+
+
2.7. Securing connections to target clusters
+
+
To secure connections from the virtual cluster to the upstream cluster, configure the target cluster’s TLS setting by doing the following:
+
+
+
+
+
If the upstream is using a private CA, use the trust properties to configure a truststore for the target cluster.
+
+
+
If you want to use mutual TLS, specify the certificate with the key property to identify the virtual cluster to the upstream.
+
+
+
If required, you can restrict the TLS protocols and cipher suites that are used to form the TLS connection.
+
+
+
For advanced use cases, configure a TLS credential supplier for dynamic per-connection certificate selection.
+
+
+
+
+
+
+
+
+
TLS is recommended on Kafka clients and virtual clusters for production configurations.
+
+
+
+
+
+
Examples below illustrate how these steps may be done.
+
+
+
2.7.1. Enabling TLS
+
+
Using an empty object ({}) enables TLS using the platform’s defaults. This means that platform trust, and default protocols and cipher suites will be used. This option is suitable if the upstream cluster is using a TLS certificate signed by a public CA and the platform’s defaults are suitable.
+
+
+
Example enabling TLS for a target cluster using platform defaults
If it is using a TLS certificate signed by a private CA, you must add truststore configuration for the target cluster. The example illustrates using PKCS #12 format. PEM format is supported too.
storeFile refers to a PKCS #12 store for the public CA certificate of the Kafka cluster.
+
+
+
+
storePassword defines the password used decrypt the store file. Omit the storePassword field if the store is not password protected.
+
+
+
+
passwordFile Password to protect the PKCS #12 store.
+
+
+
+
+
+
+
+
+
storeType identifies the keystore type. If a keystore type is not specified, the default JKS (Java Keystore) type is used.
+
+
+
+
+
+
2.7.3. Configuring mutual TLS with static credentials
+
+
For mutual TLS, add a keystore configuration for the virtual cluster. The following example uses a PEM format server certificate and key pair. PKCS #12 keystore format is supported too.
For advanced use cases requiring per-client certificate selection, you can configure a TLS credential supplier. A TLS credential supplier is a pluggable component that dynamically provides TLS credentials based on the connection context, allowing different certificates to be used for different clients or connections.
The type of TLS credential supplier (references a ServerTlsCredentialSupplierFactory implementation).
+
+
+
2
+
Optional supplier-specific configuration.
+
+
+
+
+
+
+
+
+
+
The credentialSupplier configuration is optional and follows the same pattern as filter configuration with a type field identifying the supplier implementation and an optional config object for supplier-specific settings.
+
+
+
+
+
+
+
+
+
+
The key and credentialSupplier properties are mutually exclusive. Use key for static credentials, or credentialSupplier for dynamic per-connection credential selection, but not both.
+
+
+
+
+
+
Supplier implementations must be thread-safe, as a single instance is shared across all connections to the virtual cluster. Implementations must not block the calling thread; the tlsCredentials() method returns a CompletionStage for asynchronous credential resolution.
+
+
+
+
2.7.6. Disabling TLS trust checks (testing only)
+
+
For the purposes of testing (that is, outside a production environment), you can set the insecure property to true to disable TLS trust checks (hostname verification and certificate validation) so that the Kroxylicious can connect to any Kafka cluster.
Some filters require the verified identity of the client, derived from successful authentication. This identity is called the authenticated subject.
+
+
+
For applications that use TLS client authentication, the virtual cluster can build the authenticated subject from information in the TLS client certificate presented to the proxy.
+
+
+
The transport subject builder controls how certificate information is turned into principals within the authenticated subject. For example, you can build a principal from parts of the X500 principal. A typical use case is mapping a distinguished name such as CN=myapp,OU=dev,O=kroxylicious.io,C=US to myapp.
+
+
+
+
+
+
+
For applications that use SASL authentication, see the SASL Inspection guide for information on building the authenticated subject from a successful client–broker SASL authentication.
+
+
+
+
+
+
2.8.1. Configuration
+
+
The following example shows configuration for the transport subject builder:
<field name> identifies the field to extract. Valid field names are listed in the Supported fields table.
+
+
+
<match replace flags 1>…<match replace flags n> define a set of replacement matchers used to match and transform the field value.
+
+
+
else defines the behavior when none of the replacement matchers match the field value. identity generates a principal based on the original (unmapped) field value. anonymous generates an anonymous principal.
+
+
+
<principal factory> the name of a PrincipalFactory implementation. Currently, only UserFactory is supported.
+
+
+
+
+
Field values are tested against the replacement matchers in the order they are configured. The first matcher that matches the field value generates the principal. After a successful match, remaining matchers for that field are ignored.
+
+
+
If no matcher matches, the outcome is defined by the else clause. If there is no match and no else cause is provided, no principal is generated for this field.
+
+
+
+
+
+
+
Currently, the subject builder is restricted to producing a subject containing at most one user principal.
+
+
+
+
+
+
+
2.8.2. Supported fields
+
+
+
+
+
+
+
+
Field name
+
Description
+
+
+
+
+
+
clientTlsSubject
+
+
+
X500 Principal
+
+
+
+
+
clientTlsSanRfc822Name
+
+
+
Email address from the Subject Alternate Names
+
+
+
+
+
clientTlsSanDirName
+
+
+
DirName from the Subject Alternate Names
+
+
+
+
+
clientTlsSanDnsName
+
+
+
DNSName from the Subject Alternate Names
+
+
+
+
+
clientTlsSanUri
+
+
+
URI from the Subject Alternate Names
+
+
+
+
+
+
+
2.8.3. Replacement matchers
+
+
Replacement matchers control how field values are matched and transformed. A replacement matcher is defined using this format:
<matching regular expression> is the regular expression used to test the field value.
+
+
+
<replacement> defines the replacement value to use when the regular expression is matched. The replacement can use back references ($1 etc) to refer to capturing groups defined within the regular expression.
+
+
+
<flags> (optional) can be either L or U for lower or upper-casing respectively. The case transformation is applied to the replacement value after any back reference resolution.
+
+
+
+
+
+
+
2.9. Configuring per-virtual cluster logging
+
+
You can enable low level logging on a per-virtual cluster basis. The logNetwork property controls logging of information about requests and responses at the network level, before they’ve been decoded into Kafka requests and responses. The logFrames property controls logging of the decoded requests and responses.
bindAddress specifies the address the HTTP server binds to. Defaults to 0.0.0.0.
+
+
+
port specifies the port the HTTP server binds to. Defaults to 9190.
+
+
+
endpoints defines the services exposed by the management interface.
+
+
+
+
prometheus, if present and not null, exposes a Prometheus scrape endpoint at /metrics.
+
+
+
+
+
+
+
+
+
2.11. Configuring network and Netty settings
+
+
The proxy allows configuration of network settings for both the proxy endpoints (client-facing) and management endpoints using the network.proxy and network.management properties. These settings control low-level Netty behavior and are optional, with sensible defaults provided.
proxy defines network settings for the proxy listener that handles client connections.
+
+
+
management defines network settings for the management HTTP server and can be configured independently of the proxy listener.
+
+
+
For both proxy and management:
+
+
+
+
workerThreadCount defines the number of Netty worker threads used to handle concurrent connections. Defaults to twice the number of available processors.
+
+
+
shutdownQuietPeriod defines the grace period during which the proxy continues to accept and complete in-flight requests before shutting down. If no new requests arrive during this window, shutdown proceeds. Defaults to 2s if not specified. Uses Go-style duration format (for example, 30s, 5m).
+
+
+
shutdownTimeout defines the maximum time period allowed for the proxy to complete shutdown before it is forced. Defaults to 15s if not specified. Uses Go-style duration format.
+
+
+
+
+
+
+
+
Supported duration units are: h (hours), m (minutes), s (seconds), ms (milliseconds), μs or us (microseconds), and ns (nanoseconds). Units can be combined, for example 1m30s.
+
+
+
+
+
The workerThreadCount setting allows tuning for high-concurrency deployments. Increasing this value can improve throughput when handling many simultaneous client connections.
+
+
+
The shutdownQuietPeriod and shutdownTimeout settings together control graceful shutdown behaviour. shutdownQuietPeriod should always be less than or equal to shutdownTimeout.
+
+
+
+
+
+
+
+
+
+
+
The shutdownQuietPeriodSeconds property (an integer number of seconds) is deprecated and will be removed in a future release. Use shutdownQuietPeriod with a Go-style duration string instead (for example, shutdownQuietPeriod: 2s).
+
+
+
+
+
+
+
+
+
2.12. Configuring idle connection timeouts
+
+
The proxy can automatically disconnect idle client connections to reclaim resources. Idle timeout configuration is completely optional and disabled by default, allowing you to opt in only when needed for your deployment.
+
+
+
2.12.1. When to enable idle timeouts
+
+
Consider enabling idle timeouts in the following scenarios:
+
+
+
+
+
Security posture: unauthenticated connections can be closed quickly to limit the window for abuse, while authenticated connections get a more generous timeout.
+
+
+
Unclosed connections: clients that abandon connections without properly closing them, leaving resources allocated unnecessarily.
+
+
+
Network infrastructure requirements: environments where firewalls or load balancers drop idle connections, configure the proxy to disconnect gracefully first.
+
+
+
+
+
+
2.12.2. When not to enable idle timeouts
+
+
Avoid enabling idle timeouts in the following scenarios:
+
+
+
+
+
Legitimate idle connections: applications that maintain long-lived connections with extended idle periods, such as consumers with long poll timeouts or applications using connection pooling.
+
+
+
Stable network infrastructure: environments with reliable network infrastructure and no issues with idle connection management.
+
+
+
+
+
+
2.12.3. How idle timeouts work
+
+
The proxy supports two independent timeout settings that apply at different stages of the connection lifecycle:
+
+
+
+
+
Unauthenticated timeout (unauthenticatedIdleTimeout): applies to connections where the proxy has not yet detected completed authentication. The proxy considers authentication complete if either of the following is true:
+
+
+
+
A transport subject builder (a component that extracts an authenticated identity from transport-layer attributes) creates a subject with an identity (for example, from a client TLS certificate).
+
+
+
A SASL inspection or termination filter invokes io.kroxylicious.proxy.filter.FilterContext.clientSaslAuthenticationSuccess.
+
+
+
+
+
+
Authenticated timeout (authenticatedIdleTimeout): applies to connections where an identity has been established, for the remainder of the connection’s lifetime.
+
+
+
+
+
+
+
+
+
+
+
For the proxy to detect authentication completion, you must configure either TLS client certificate authentication or a SASL inspection or termination filter. Without one of these, all connections remain in the unauthenticated state for their entire lifetime, and authenticatedIdleTimeout has no effect. For more information, see the SASL inspection filter guide.
+
+
+
+
+
+
+
+
Both timeout settings are optional and have no default values. You can configure one, both, or neither depending on your requirements. Timeout values use a string-based duration format, following Go conventions (for example, 30s, 5m). Supported units are: h (hours), m (minutes), s (seconds), ms (milliseconds), μs or us (microseconds), and ns (nanoseconds). Units can be combined, for example 1m30s.
unauthenticatedIdleTimeout specifies how long unauthenticated connections can remain idle before they are disconnected. If omitted, or zero, no idle timeout is applied.
+
+
+
authenticatedIdleTimeout specifies how long authenticated connections can remain idle before they are disconnected. If omitted, or zero, no idle timeout is applied.
+
+
+
+
+
+
2.12.5. Monitoring idle disconnects
+
+
The proxy tracks idle disconnects using the kroxylicious_client_to_proxy_disconnects_total metric with cause="idle_timeout". This counter increments each time a connection is closed after exceeding the configured idle timeout.
+
+
+
The kroxylicious_client_to_proxy_disconnects_total metric also tracks other disconnect scenarios:
+
+
+
+
+
cause="idle_timeout" - Connection exceeded the configured idle timeout duration
+
+
+
cause="client_closed" - The downstream client initiated the connection close
+
+
+
cause="server_closed" - The upstream node closed the connection, causing the proxy to close the client connection
When Kroxylicious is deployed behind a load balancer that uses the HAProxy PROXY protocol, you can configure PROXY protocol handling so that the original client connection metadata (source address, destination address, ports) is preserved.
+
+
+
Both PROXY protocol v1 (text) and v2 (binary, including TLV extensions) are supported.
+
+
+
2.13.1. Proxy protocol modes
+
+
The proxyProtocol.mode property controls how Kroxylicious handles incoming connections:
+
+
+
+
+ required
+
+
+
Every incoming connection must begin with a PROXY protocol header. Connections without a valid header are rejected immediately with a warning log. Use this when Kroxylicious is always behind a PROXY protocol-capable load balancer.
+
+
+ allowed
+
+
+
Kroxylicious inspects the first bytes of each connection and automatically detects whether a PROXY protocol header is present. If detected, the header is decoded and the original client address is extracted. If not, the bytes are passed through to the Kafka protocol decoder. Use this when the same listener may receive both proxied and direct connections.
+
+
+ disabled
+
+
+
No PROXY protocol handling (default). All bytes are treated as Kafka protocol data.
+
+
+
+
+
Configuration fragment with PROXY protocol in required mode
+
+
proxyProtocol:
+ mode: required
+
+
+
+
Configuration fragment with PROXY protocol in allowed mode
+
+
proxyProtocol:
+ mode: allowed
+
+
+
+
+
+
+
+
+
+
When mode is set to required, all incoming connections must include a PROXY protocol header. If a client connects directly without going through a PROXY protocol-capable load balancer, the connection will be rejected and Kroxylicious will log a warning:
+
+
+
Connection rejected — expected PROXY protocol header but received non-PROXY data. Ensure the upstream load balancer is configured to send PROXY protocol headers, or set proxyProtocol mode to 'allowed' or 'disabled'.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
When mode is set to allowed, any client that can connect directly to Kroxylicious (bypassing the load balancer) can send a PROXY protocol header with a spoofed source address. Only use allowed mode when your network topology guarantees that untrusted clients cannot reach the proxy port directly, or when source address spoofing is an acceptable risk. If all connections come through a PROXY protocol-capable load balancer, prefer required mode instead.
+
+
+
+
+
+
+
+
+
+
2.14. Example Kroxylicious configuration
+
+
+
+
Virtual clusters that represent the Kafka clusters
+
+
+
Network addresses for broker communication in a Kafka cluster
+
+
+
Filters to introduce additional functionality to the Kafka deployment
+
+
+
+
+
In this example, configuration for the Record Encryption filter is shown.
filterDefinitions provides a list of named filter configurations.
+
+
+
+
name identifies the filter
+
+
+
type provides the type of filter, which is the Record Encryption filter using Vault as the KMS in this example.
+
+
+
config defines configuration specific to the type of filter.
+
+
+
+
+
+
tls specifies the TLS configuration used to establish a connection to the KMS, including the key names under which TLS certificates are stored.
+
+
+
virtualClusters specifies the virtual cluster defintions.
+
+
+
+
name defines the name of the virtual cluster.
+
+
+
targetCluster defines the physical upstream cluster.
+
+
+
+
bootstrapServers defines the bootstrap addresses of the Kafka Cluster being proxied.
+
+
+
strategy defines the strategy used for selecting an address from bootstrapServers when the proxy receives a client connection. Options are round-robin (default) or random.
+
+
+
tls provides TLS configuration for the connection to the target cluster.
+
+
+
+
+
+
+
+
+
gateways defines how the virtual cluster is exposed to the network.
+
+
+
+
The gateway routing scheme is configured using either sniHostIdentifiesNode or portIdentifiesNode.
+
+
+
bootstrapAddress defines the hostname and port of the bootstrap used by the Kafka clients to connect to the gateway. The hostname must be resolved by the clients.
+
+
+
tls defines the TLS configuration used by the gateway for securing connections with the clients.
+
+
+
+
+
+
+
+
+
+
+
3. Built-in filters
+
+
+
Kroxylicious comes with a suite of built-in filters designed to enhance the functionality and security of your Kafka clusters.
+
+
+
3.1. Authorization filter
+
+
The Kroxylicious Authorization filter enables the proxy to enforce authorization rules on client requests before they reach the Kafka brokers. For information on using the filter, see the Authorization guide.
+
+
+
+
3.2. Record Encryption filter
+
+
The Kroxylicious Record Encryption filter enables encryption-at-rest for Apache Kafka clusters. For information on using the filter, see the Record Encryption guide.
+
+
+
+
3.3. Record Validation filter
+
+
The Kroxylicious Record Validation filter validates records sent by Kafka client to Kafka brokers. For information on using the filter, see the Record Validation guide.
+
+
+
+
3.4. Multi-tenancy filter
+
+
The Kroxylicious multi-tenancy filter presents a single Kafka cluster to tenants as if it were multiple clusters. For information on using the filter, see the Kroxylicious Multi-Tenancy guide.
+
+
+
+
3.5. Oauth Bearer Validation filter
+
+
The Kroxylicious Oauth Bearer Validation filter enables a validation on the JWT token received from client before forwarding it to cluster. For information on using the filter, see the OAuth Bearer Validation guide.
+
+
+
+
3.6. SASL Inspection filter
+
+
The Kroxylicious SASL Inspection filter extracts the authenticated principal from a successful SASL exchange between Kafka Client and Kafka Broker and makes it available to the other filters in the chain. For information on using the filter, see the SASL Inspection guide.
+
+
+
+
3.7. Connection Expiration filter
+
+
The Kroxylicious Connection Expiration filter closes client connections after a configurable expiration age. This is useful in dynamic environments like Kubernetes where proxy instances scale up and down, helping to rebalance connections across proxy instances. For information on using the filter, see the Connection Expiration guide.
+
+
+
+
+
+
4. Community filters
+
+
+
Community contributed filters are showcased in the Community Gallery.
+
+
+
+
+
+
+
These filters are contributed by the community and are not managed or maintained by the Kroxylicious team. Use them at your own risk.
+
+
+
+
+
+
+
+
5. Monitoring proxies
+
+
+
Kroxylicious supports key observability features to help you understand the performance and health of your proxy instances.
+
+
+
The Kroxylicious Proxy supports real-time monitoring and alerting by emitting system metrics. You can configure metric emission within the proxy and integrate it with a monitoring system like Prometheus to ingest and analyze the data.
+
+
+
The Kroxylicious Proxy writes a log so that its actions may be understood over time. You can adjust log levels and customize logging as described in this section.
+
+
+
5.1. Introducing metrics
+
+
If you want to introduce metrics to your Kroxylicious deployment, you can configure an insecure HTTP and Prometheus endpoint (at /metrics).
+
+
+
Add the following to the ConfigMap resource that defines the Kroxylicious configuration:
+
+
+
Minimal metrics configuration
+
+
management:
+ endpoints:
+ prometheus: {}
+
+
+
+
By default, the HTTP endpoint listens on 0.0.0.0:9190. You can change the bind address and port as follows:
+
+
+
Example metrics configuration with bind address and port
The proxy provides metrics for both connections and messages. These metrics are categorized into downstream (client-side) and upstream (broker-side) groups They allow users to assess the impact of the proxy and its filters on their Kafka system.
+
+
+
+
+
Connection metrics count the connections made from the downstream (incoming connections from the clients) and the connection made by the proxy to upstream (outgoing connections to the Kafka brokers).
Connection metrics count the TCP connections made from the client to the proxy (kroxylicious_client_to_proxy_request_total) and from the proxy to the broker (kroxylicious_proxy_to_server_connections_total). These metrics count connection attempts, so the connection count is incremented even if the connection attempt ultimately fails.
+
+
+
In addition to the count metrics, there are active connection gauge metrics that track the current number of open connections, and error metrics.
+
+
+
+
+
If an error occurs whilst the proxy is accepting a connection from the client the kroxylicious_client_to_proxy_errors_total metric is incremented by one.
+
+
+
If an error occurs whilst the proxy is attempting a connection to a broker the kroxylicious_proxy_to_server_errors_total metric is incremented by one.
+
+
+
+
+
Connection and connection error metrics include the following labels: virtual_cluster (the virtual cluster’s name) and node_id (the broker’s node ID). When the client connects to the bootstrap endpoint of the virtual cluster, a node ID value of bootstrap is recorded.
+
+
+
The kroxylicious_client_to_proxy_errors_total metric also counts connection errors that occur before a virtual cluster has been identified. For these specific errors, the virtual_cluster and node_id labels are set to an empty string ("").
+
+
+
+
+
+
+
Error conditions signaled within the Kafka protocol response (such as RESOURCE_NOT_FOUND or UNKNOWN_TOPIC_ID) are not classed as errors by these metrics.
+
+
+
+
+
+
Understanding connection metrics relationships
+
+
The proxy provides several related metrics for tracking connections:
+
+
+
+
+
Connection counters (kroxylicious_*_connections_total) track the total number of connection attempts over time. These values only increase and provide a historical view of connection activity.
+
+
+
Active connection gauges (kroxylicious_*_active_connections) show the current number of open connections at any given moment. These values increase when connections are established and decrease when connections are closed.
+
+
+
Error counters (kroxylicious_*_errors_total) track connections that closed due to errors.
+
+
+
Disconnect counters (kroxylicious_client_to_proxy_disconnects_total) track connections that closed without errors, categorized by cause.
+
+
+
+
+
When a connection closes, it increments either the error counter or one of the disconnect counter causes, but never both. The active connection gauge decreases regardless of whether the closure was due to an error or a clean disconnect.
+
+
+
The following relationship holds:
+
+
+
Active connections = Connections total - (Errors total + sum of all Disconnect causes)
+
+
+
Table 1. Connection metrics for client and broker interactions
+
+
+
+
+
+
+
+
+
Metric Name
+
Type
+
Labels
+
Description
+
+
+
+
+
+
kroxylicious_client_to_proxy_connections_total
+
+
+
Counter
+
+
+
virtual_cluster, node_id
+
+
+
Incremented by one every time a connection is accepted from a client by the proxy.
+
+ This metric counts all connection attempts that reach the proxy, even those that end in error.
+
+
+
+
+
kroxylicious_client_to_proxy_errors_total
+
+
+
Counter
+
+
+
virtual_cluster, node_id
+
+
+
Incremented by one every time a connection is closed due to any downstream error.
+
+
+
+
+
kroxylicious_proxy_to_server_connections_total
+
+
+
Counter
+
+
+
virtual_cluster, node_id
+
+
+
Incremented by one every time a connection is made to the server from the proxy.
+
+ This metric counts all connections attempted to the broker, even those that end in error.
+
+
+
+
+
kroxylicious_proxy_to_server_errors_total
+
+
+
Counter
+
+
+
virtual_cluster, node_id
+
+
+
Incremented by one every time a connection is closed due to any upstream error.
+
+
+
+
+
kroxylicious_client_to_proxy_active_connections
+
+
+
Gauge
+
+
+
virtual_cluster, node_id
+
+
+
Shows the current number of active TCP connections from clients to the proxy.
+
+ This gauge reflects real-time connection state and decreases when connections are closed.
+
+
+
+
+
kroxylicious_proxy_to_server_active_connections
+
+
+
Gauge
+
+
+
virtual_cluster, node_id
+
+
+
Shows the current number of active TCP connections from the proxy to servers.
+
+ This gauge reflects real-time connection state and decreases when connections are closed.
+
+
+
+
+
kroxylicious_client_to_proxy_disconnects_total
+
+
+
Counter
+
+
+
virtual_cluster, node_id, cause
+
+
+
Incremented by one every time a client connection is closed by the proxy. The cause label indicates the reason for disconnection:
+
+ idle_timeout - Connection exceeded the configured idle timeout duration (requires idle timeouts configured via network.proxy.unauthenticatedIdleTimeout or network.proxy.authenticatedIdleTimeout).
+
+ client_closed - Client initiated the connection close.
+
+ server_closed - Backend server closed the connection, causing the proxy to close the client connection.
+
+ Note: Error-based disconnects are tracked separately via kroxylicious_client_to_proxy_errors_total, not this metric.
+
+
+
+
+
+
+
+
5.2.2. Message metrics
+
+
Message metrics count and record the sizes of the Kafka protocol requests and responses that flow through the proxy.
+
+
+
Use these metrics to help understand:
+
+
+
+
+
the number of messages flowing through the proxy.
+
+
+
the overall volume of data through the proxy.
+
+
+
the effect the filters are having on the messages.
+
+
+
Downstream metrics
+
+
+
+
kroxylicious_client_to_proxy_request_total counts requests as they arrive from the client.
+
+
+
kroxylicious_proxy_to_client_response_total counts responses as they are returned to the client.
+
+
+
kroxylicious_client_to_proxy_request_size_bytes is incremented by the size of each request as it arrives from the client.
+
+
+
kroxylicious_proxy_to_client_response_size_bytes is incremented by the size of each response as it is returned to the client.
+
+
+
+
+
+
Upstream metrics
+
+
+
+
kroxylicious_proxy_to_server_request_total counts requests as they go to the broker.
+
+
+
kroxylicious_server_to_proxy_response_total counts responses as they are returned by the broker.
+
+
+
kroxylicious_proxy_to_server_request_size_bytes is incremented by the size of each request as it goes to the broker.
+
+
+
kroxylicious_server_to_proxy_response_size_bytes is incremented by the size of each response as it is returned by the broker.
+
+
+
+
+
+
+
+
The size recorded is the encoded size of the protocol message. It includes the 4 byte message size.
+
+
+
Filters can alter the flow of messages through the proxy or the content of the message. This is apparent through the metrics.
+
+
+
+
+
If a filter sends a short-circuit, or closes a connection the downstream message counters will exceed the upstream counters.
+
+
+
If a filter changes the size of the message, the downstream size metrics will be different to the upstream size metrics.
+
+
+
+
+
+
+
+
Figure 5. Downstream and upstream message metrics in the proxy
+
+
+
Message metrics include the following labels: virtual_cluster (the virtual cluster’s name), node_id (the broker’s node ID), api_key (the message type), api_version, and decoded (a flag indicating if the message was decoded by the proxy).
+
+
+
When the client connects to the bootstrap endpoint of the virtual cluster, metrics are recorded with a node ID value of bootstrap.
+
+
+
Table 2. Kafka message metrics for proxy request and response flow
targets specifies the host that is running the Kroxylicious instance and the port number assigned to management.
+
+
+
+
+
+
5.4. Integrating Micrometer
+
+
Kroxylicious integrates with Micrometer for gathering metrics.
+
+
+
Micrometer provides a simple facade over instrumentation clients for popular observability systems, allowing you to instrument your JVM-based application code without vendor lock-in. The following example shows how to define the CommonTagsHook and StandardBindersHook types to add a label to metrics and register a JVM metrics binder.
+
+
+
Example proxy configuration for Micrometer integration
Micrometer uses the concept of meter binders to register metrics that provide information about the state of some aspect of the application or its container. By registering standard binders included with Micrometer, you can expose metrics about the JVM and system, such as JVM memory usage and garbage collection.
Micrometer offers a pause detector (using the LatencyUtils package) which attempts to compensate for requests which would have been accepted and delayed by a pause arising from a source external to the monitored section (There are a wide variety of possible sources such as Garbage collectors or system pauses). Micrometer defaults to a no-op pause detector implementation, but it also provides a clock drift based implementation which can be optionally configured. The clock drift based pause detector can be configured by providing a sleep interval a pause threshold. These values are defaulted to the Micrometer recommended 100ms but you can choose to configure different values based on your environment. Using the clock drift based pause detector can result in samples being added to timers in unexpected ways in some degenerate cases so users are advised to use it after careful consideration.
Use the static methods of Micrometer Metrics to register metrics with the global registry.
+
+
+
Alternatively, use Metrics.globalRegistry to get a reference to the global registry. Metrics registered this way are automatically available through the Prometheus scrape endpoint.
When you use the bin/kroxylicious-start.sh script from the binary distribution, you can configure logging behavior using the following environment variables.
+
+
+
Table 4. Logging environment variables
+
+
+
+
+
+
+
+
+
Environment variable
+
Default
+
Valid values
+
Description
+
+
+
+
+
+
KROXYLICIOUS_ROOT_LOG_LEVEL
+
+
+
WARN
+
+
+
TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
+
+
+
Log level for all loggers that are not otherwise configured.
+
+
+
+
+
KROXYLICIOUS_APP_LOG_LEVEL
+
+
+
INFO
+
+
+
TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
+
+
+
Log level for Kroxylicious loggers (io.kroxylicious.*). This setting takes precedence over KROXYLICIOUS_ROOT_LOG_LEVEL for application code.
+
+
+
+
+
KROXYLICIOUS_LOG_FORMAT
+
+
+
text
+
+
+
text, json
+
+
+
Output format for log messages. Set to json to enable structured JSON output.
JSON layout template used when KROXYLICIOUS_LOG_FORMAT=json. For example, use classpath:EcsLayout.json for Elastic Common Schema or classpath:GelfLayout.json for GELF format.
+
+
+
+
+
KROXYLICIOUS_LOGGING_OPTIONS
+
+
+
(built-in defaults)
+
+
+
Any valid log4j2 JVM system properties.
+
+
+
Overrides the entire set of log4j2 JVM system properties. When set, the default -Dlog4j2.configurationFile and -Dlog4j2.contextSelector options are replaced. You must include all required options explicitly.
+
+
+
+
+
+
The following examples show how to set the application log level and configure a custom Log4j2 configuration.
+
+
+
Setting the application log level to DEBUG
+
+
KROXYLICIOUS_APP_LOG_LEVEL="DEBUG"
+
+
+
+
+
+
+
+
Setting the log level to DEBUG or TRACE produces very verbose logs.
Setting KROXYLICIOUS_LOGGING_OPTIONS replaces the built-in defaults. Always include -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector to preserve asynchronous logging.
+
+
+
+
+
+
5.5.1. Switching to JSON log format
+
+
By default, Kroxylicious outputs logs in human-readable text format. You can switch to structured JSON output to integrate with log aggregation systems such as the Elastic Stack (ELK) or Splunk.
+
+
+
The JSON format uses Log4j2’s JsonTemplateLayout with the Logstash JSON Event V1 format. Structured context fields emitted by Kroxylicious appear as top-level fields alongside standard log metadata.
+
+
+
Procedure
+
+
+
Set the KROXYLICIOUS_LOG_FORMAT environment variable before starting the proxy:
When an exception is logged, a stack_trace field is included in the output.
+
+
+
+
To switch back to text format, remove the environment variable or set it to text:
+
+
+
+
+
+
unset KROXYLICIOUS_LOG_FORMAT
+# or
+export KROXYLICIOUS_LOG_FORMAT=text
+
+
+
+
+
+
5.6. Structured log fields
+
+
Kroxylicious uses structured logging, attaching context as key-value pairs that are visible in both text and JSON output. In text format, they appear at the end of each log line. In JSON format, they appear as top-level fields.
+
+
+
The following table lists commonly emitted fields.
+
+
+
Table 5. Structured log fields
+
+
+
+
+
+
+
Field
+
Description
+
+
+
+
+
+
sessionId
+
+
+
Unique identifier for the client-to-proxy connection. Use this field to correlate all log messages for a single client session.
+
+
+
+
+
clientId
+
+
+
The Kafka client.id value supplied by the connecting Kafka client.
+
+
+
+
+
apiKey
+
+
+
The Kafka API key that identifies the request type (for example, Produce, Fetch, Metadata).
+
+
+
+
+
apiVersion
+
+
+
The Kafka API version used in the request.
+
+
+
+
+
kekRef
+
+
+
Reference to the Key Encryption Key (KEK) used by the Record Encryption filter. This field is emitted only when the Record Encryption filter is configured with the HashiCorp Vault KMS provider.
+
+
+
+
+
error
+
+
+
Error message associated with a failure condition. This field is emitted across the proxy runtime, filters, and KMS providers when an error is encountered.
+
+
+
+
+
+
+
+
+
6. Glossary
+
+
+
Glossary of terms used in the Kroxylicious documentation.
+
+
+
+
API
+
+
Application Programmer Interface.
+
+
CA
+
+
Certificate Authority. An organization that issues certificates.
+
+
mTLS
+
+
Mutual Transport Layer Security. A configuration of TLS where the client presents a certificate to a server, which the server authenticates.
+
+
TLS
+
+
The Transport Layer Security. A secure transport protocol where a server presents a certificate to a client, which the client authenticates. TLS was previously known as the Secure Sockets Layer (SSL).
+
+
TCP
+
+
The Transmission Control Protocol.
+
+
+
+
+
+
+
7. Trademark notice
+
+
+
+
+
Apache Kafka is a registered trademark of The Apache Software Foundation.
+
+
+
Kubernetes is a registered trademark of The Linux Foundation.
+
+
+
Prometheus is a registered trademark of The Linux Foundation.
+
+
+
Strimzi is a trademark of The Linux Foundation.
+
+
+
Hashicorp Vault is a registered trademark of HashiCorp, Inc.
+
+
+
AWS Key Management Service is a trademark of Amazon.com, Inc. or its affiliates.
+
+
+
Microsoft, Azure, and Microsoft Entra are trademarks of the Microsoft group of companies.
+
+
+
Fortanix and Data Security Manager are trademarks of Fortanix, Inc.
+
+
+
+
+
+
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/kroxylicious-proxy/index.html b/documentation/0.21.0/html/kroxylicious-proxy/index.html
new file mode 100644
index 00000000..eb62f11f
--- /dev/null
+++ b/documentation/0.21.0/html/kroxylicious-proxy/index.html
@@ -0,0 +1,11 @@
+---
+layout: guide
+title: Proxy Guide
+description: "Using the Proxy, including configuration, security and operation."
+tags:
+ - proxy
+ - security
+rank: '010'
+version: 0.21.0
+permalink: /documentation/0.21.0/html/kroxylicious-proxy/
+---
diff --git a/documentation/0.21.0/html/kroxylicious-proxy/toc.html b/documentation/0.21.0/html/kroxylicious-proxy/toc.html
new file mode 100644
index 00000000..f8b13d83
--- /dev/null
+++ b/documentation/0.21.0/html/kroxylicious-proxy/toc.html
@@ -0,0 +1,50 @@
+{% raw %}
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/multi-tenancy-guide/content.html b/documentation/0.21.0/html/multi-tenancy-guide/content.html
new file mode 100644
index 00000000..b4700ba3
--- /dev/null
+++ b/documentation/0.21.0/html/multi-tenancy-guide/content.html
@@ -0,0 +1,222 @@
+{% raw %}
+
+
+
+
+
+
About this guide
+
+
This guide covers using the Kroxylicious Multi-tenancy Filter to present a single Kafka cluster as if it were multiple clusters. Refer to other Kroxylicious guides for information on running the proxy or for advanced topics such as plugin development.
+
+
+
Kroxylicious’s Multi-tenancy filter presents a single Kafka cluster to tenants as if it were multiple clusters. Operations are isolated to a single tenant by prefixing resources with an identifier.
+
+
+
+
+
+
+
This filter is currently in incubation and available as a preview. We would not recommend using it in a production environment.
+
+
+
+
+
+
The Multi-tenancy filter works by intercepting all Kafka RPCs (remote procedure calls) that reference resources, such as topic names and consumer group names:
+
+
+
+
Request path
+
+
On the request path, resource names are prefixed with a tenant identifier.
+
+
Response path
+
+
On the response path, the prefix is removed.
+
+
+
+
+
Kafka RPCs that list resources are filtered so that only resources belonging to the tenant are returned, effectively creating a private cluster experience for each tenant.
+
+
+
To set up the filter, configure it in Kroxylicious.
+
+
+
+
+
+
+
While the Multi-tenancy filter isolates operations on resources, it does not isolate user identities across tenants. User authentication and ACLs (Access Control Lists) are shared across all tenants, meaning that identity is not scoped to individual tenants. For more information on open issues related to this filter, see Kroxylicious issues.
+
+
+
+
+
+
+
+
+
+
For more information on Kafka’s support for multi-tenancy, see the Apache Kafka website.
+
+
+
+
+
+
+
+
1. (Preview) Setting up the Multi-tenancy filter
+
+
+
This procedure describes how to set up the Multi-tenancy filter by configuring it in Kroxylicious. The filter dynamically prefixes resource names to create isolation between tenants using the same Kafka cluster. The prefix representing a tenant is taken from the name of the virtual cluster representing the tenant. For example, if the virtual cluster is named tenant-1, the prefix is tenant-1. Each tenant must be represented by a unique virtual cluster, and virtual cluster names must be globally unique within the Kroxylicious configuration. This means that the same virtual cluster name cannot be used to represent different Kafka clusters.
Verify that multi-tenancy filtering has been applied.
+
+
For example, create a topic through each virtual cluster and check that the topics are prefixed with the name of the corresponding virtual cluster.
+
+
+
+
+
+
1.1. Example proxy configuration file
+
+
If your instance of the Kroxylicious Proxy runs directly on an operating system, provide the filter configuration in the filterDefinitions list of your proxy configuration.
+
+
+
Here’s a complete example of a filterDefinitions entry configured for multi-tenancy:
+{% endraw %}
diff --git a/documentation/0.21.0/html/oauth-bearer-validation/content.html b/documentation/0.21.0/html/oauth-bearer-validation/content.html
new file mode 100644
index 00000000..b62981ec
--- /dev/null
+++ b/documentation/0.21.0/html/oauth-bearer-validation/content.html
@@ -0,0 +1,246 @@
+{% raw %}
+
+
+
+
+
+
About this guide
+
+
This guide covers using the Kroxylicious Oauth Bearer Validation Filter. This filter validates the JWT token received from client before forwarding it to the cluster. Refer to other Kroxylicious guides for information on running the proxy or for advanced topics such as plugin development.
+
+
+
OauthBearerValidation filter enables a validation on the JWT token received from client before forwarding it to cluster.
+
+
+
If the token is not validated, then the request is short-circuited. It reduces resource consumption on the cluster when a client sends too many invalid SASL requests.
+
+
+
+
+
+
Figure 1. Sequence diagram showing the filter validating the oauth token before it reaches the broker.
+
+
+
+
+
1. Configuring the Oauth Bearer Validation filter
+
+
+
This procedure describes how to set up the Oauth Bearer Validation filter by configuring it in Kroxylicious.
If your instance of the Kroxylicious Proxy runs directly on an operating system, provide the filter configuration in the filterDefinitions list of your proxy configuration.
+
+
+
Here’s a complete example of a filterDefinitions entry configured for Oauth Bearer validation:
jwksEndpointUrl (Required) specifies the OAuth/OIDC provider endpoint from which the JSON Web Key Set (JWKS) is retrieved. The following properties are optional:
+
+
+
jwksEndpointRefreshMs specifies the interval, in milliseconds, between refreshes of the JWKS cache used to verify JWT signatures.
+
+
+
jwksEndpointRetryBackoffMs specifies the initial delay, in milliseconds, between attempts to retrieve the JWKS from the external authentication provider.
+
+
+
jwksEndpointRetryBackoffMaxMs specifies the maximum delay, in milliseconds, between JWKS retrieval attempts.
+
+
+
scopeClaimName specifies an alternative claim name for the scope in the JWT payload.
+
+
+
subClaimName specifies an alternative claim name for the subject in the JWT payload.
+
+
+
authenticateBackOffMaxMs specifies the maximum backoff time, in milliseconds, applied to repeated authentication attempts. A value of 0 disables backoff. Otherwise, an exponential delay is added to each authenticate request until the authenticateBackOffMaxMs has been reached.
+
+
+
authenticateCacheMaxSize specifies the maximum number of failed tokens retained in the cache.
+
+
+
expectedAudience specifies a comma-delimited list of valid audiences used to verify the JWT.
+
+
+
expectedIssuer specifies the expected issuer used to verify the JWT.
jwksEndpointUrl (Required) specifies the OAuth/OIDC provider URL from which the provider’s JSON Web Key Set (JWKS) is retrieved. The following properties are optional:
+
+
+
jwksEndpointRefreshMs specifies the interval, in milliseconds, between refreshes of the JWKS cache used to verify JWT signatures.
+
+
+
jwksEndpointRetryBackoffMs specifies the initial delay, in milliseconds, between attempts to retrieve the JWKS from the external authentication provider.
+
+
+
jwksEndpointRetryBackoffMaxMs specifies the maximum delay, in milliseconds, between JWKS retrieval attempts.
+
+
+
scopeClaimName specifies an alternative claim name for the scope in the JWT payload.
+
+
+
subClaimName specifies an alternative claim name for the subject in the JWT payload.
+
+
+
authenticateBackOffMaxMs specifies the maximum backoff time, in milliseconds, applied to repeated authentication attempts. A value of 0 disables backoff. Otherwise, an exponential delay is added to each authenticate request until the authenticateBackOffMaxMs has been reached.
+
+
+
authenticateCacheMaxSize specifies the maximum number of failed tokens retained in the cache.
+
+
+
expectedAudience specifies a comma-delimited list of valid audiences used to verify the JWT.
+
+
+
expectedIssuer specifies the expected issuer used to verify the JWT.
+{% endraw %}
diff --git a/documentation/0.21.0/html/proxy-quick-start/content.html b/documentation/0.21.0/html/proxy-quick-start/content.html
new file mode 100644
index 00000000..edce3b32
--- /dev/null
+++ b/documentation/0.21.0/html/proxy-quick-start/content.html
@@ -0,0 +1,189 @@
+{% raw %}
+
+
+
+
+
+
+
Get Kroxylicious, the snappy open source proxy for Apache Kafka®, up and running in minutes.
+
+
+
+
+
1. Prerequisites
+
+
+
Before you begin, ensure you have the following installed and configured.
+
+
+
+
+
Java 21+
+
+
Kroxylicious requires a Java Runtime Environment (JRE) version 21 or higher (JRE 17 remains supported for now but is deprecated). To check your version, run:
+
+
+
+
java -version
+
+
+
+
+
+
+
+
If this command fails, you may need to install a JRE or add it to your system’s PATH variable.
+
+
+
+
+
+
+
An Apache Kafka® Cluster
+
+
Kroxylicious needs a running Kafka cluster to proxy.
+
+
+
If you don’t have one already, and you have a container engine like docker or podman installed, then the easiest way to deploy a Kafka service is in a container like:
+
+
+
+
podman pull apache/kafka:4.2.0
+podman run -p 9092:9092 apache/kafka:4.2.0
+
+
+
+
Otherwise, the official Apache Kafka® quick start provides further instructions for setting up a local cluster using the Kafka binary distribution.
+
+
+
+
Kafka Binary Distribution
+
+
To interact with your Kafka cluster and test the proxy, you will need the Kafka command-line tools. Obtain the binary distribution from the official Apache Kafka website: Apache Kafka Downloads.
Extract the archive into your desired installation directory.
+
+
+
tar -zxf kroxylicious-app-0.21.0-bin.tar.gz
+
+
+
+
+
+
+
+
+
+
+
For Windows, you might find the .zip format easier to work with.
+
+
+
+
+
+
+
+
3. Configure the Proxy
+
+
+
Kroxylicious uses a YAML file for configuration. You can define virtual clusters, specify target Kafka clusters, and enable filters.
+
+
+
For this quick start, we’ll use the example configuration file located at config/example-proxy-config.yaml. This file is pre-configured for a local Kafka cluster running on default ports.
+
+
+
+
+
+
+
If your Kafka cluster uses custom ports or runs on a different machine, you’ll need to adjust the settings in the YAML file. See the Kroxylicious Proxy guide for more advanced configuration options.
+
+
+
+
+
+
+
+
4. Start the Proxy
+
+
+
From your Kroxylicious installation directory, run the start script and point it to your configuration file.
+
+
+
+
cd kroxylicious-app-0.21.0
+
+./bin/kroxylicious-start.sh --config config/example-proxy-config.yaml
+
+
+
+
To use a custom configuration, simply replace the file path after the --config flag.
+
+
+
+
+
5. Configure Kafka clients to connect to the proxy
+
+
+
Finally, point your Kafka clients to the proxy’s bootstrap address and send it some requests.
+
+
+
5.1. Create a Topic
+
+
Use the kafka-topics.sh client to create a topic named my_topic through the proxy.
+{% endraw %}
diff --git a/documentation/0.21.0/html/record-encryption-guide/content.html b/documentation/0.21.0/html/record-encryption-guide/content.html
new file mode 100644
index 00000000..0201e4ce
--- /dev/null
+++ b/documentation/0.21.0/html/record-encryption-guide/content.html
@@ -0,0 +1,3063 @@
+{% raw %}
+
+
+
+
+
+
About this guide
+
+
This guide covers using the Kroxylicious Record Encryption Filter to provide encryption-at-rest for Apache Kafka. Refer to other Kroxylicious guides for information on running the proxy or for advanced topics such as plugin development.
+
+
+
The Kroxylicious Record Encryption filter enhances the security of Kafka messages. The filter uses industry-standard cryptographic techniques to apply encryption to Kafka messages, ensuring the confidentiality of data stored in the Kafka Cluster. By centralizing topic-level encryption, Kroxylicious provides streamlined protection across Kafka clusters.
+
+
+
To use the filter, follow these steps:
+
+
+
+
+
Set up a Key Management System (KMS)
+
+
+
Establish encryption keys within the KMS for securing the topics
+
+
+
Configure the filter within Kroxylicious
+
+
+
+
+
The filter integrates with a Key Management Service (KMS), which is responsible for the safe storage of sensitive key material. Kroxylicious supports the following KMS providers:
+
+
+
+
+
HashiCorp Vault (On-premise, HashiCorp Cloud Platform Vault and Enterprise are supported. Free tier HCP Vault Secrets is not).
+
+
+
AWS Key Management Service.
+
+
+
Azure Key Vault
+
+
+
Fortanix DSM
+
+
+
+
+
You can provide implementations for your specific KMS systems. Additional KMS support may be added based on demand.
+
+
+
+
+
1. How encryption works
+
+
+
The Record Encryption filter uses envelope encryption to encrypt records with symmetric encryption keys. The filter encrypts records from produce requests and decrypts records from fetch responses.
+
+
+
+
Envelope encryption
+
+
Envelope encryption is an industry-standard technique suited for encrypting large volumes of data in an efficient manner. Data is encrypted with a Data Encryption Key (DEK). The DEK is encrypted using a Key Encryption Key (KEK). The KEK is stored securely in a Key Management System (KMS).
+
+
Symmetric encryption keys
+
+
AES(GCM) 256 bit encryption symmetric encryption keys are used to encrypt and decrypt record data.
+
+
+
+
+
+
+
+
+
If you are using Azure Key Vault and Managed HSM is not available, you can use RSA-OAEP-256 encryption, using a 2048-bit (or greater) asymmetric key instead of 256-bit AES-GCM symmetric keys. This approach is not quantum-resistant.
+
+
+
+
+
+
The process is as follows:
+
+
+
+
+
The filter intercepts produce requests from producing applications and transforms them by encrypting the records.
+
+
+
The produce request is forwarded to the broker.
+
+
+
The filter intercepts fetch responses from the broker and transforms them by decrypting the records.
+
+
+
The fetch response is forwarded to the consuming application.
+
+
+
+
+
The filter encrypts the record value only. Record keys, headers, and timestamps are not encrypted.
+
+
+
The entire process is transparent from the point of view of Kafka clients and Kafka brokers. Neither are aware that the records are being encrypted, nor do they have any access to the encryption keys or have any influence on the ciphering process to secure the records.
+
+
+
1.1. How the filter encrypts records
+
+
The filter encrypts records from produce requests as follows:
+
+
+
+
+
Filter selects a KEK to apply.
+
+
+
Requests the KMS to generate a DEK for the KEK.
+
+
+
Uses an encrypted DEK (DEK encrypted with the KEK) to encrypt the record.
+
+
+
Replaces the original record with a ciphertext record (encrypted record, encrypted DEK, and metadata).
+
+
+
+
+
The filter uses a DEK reuse strategy. Encrypted records are sent to the same topic using the same DEK until a time-out or an encryption limit is reached.
+
+
+
+
1.2. How the filter decrypts records
+
+
The filter decrypts records from fetch responses as follows:
+
+
+
+
+
Filter receives a cipher record from the Kafka broker.
+
+
+
Reverses the process that constructed the cipher record.
+
+
+
Uses KMS to decrypt the DEK.
+
+
+
Uses the decrypted DEK to decrypt the encrypted record.
+
+
+
Replaces the cipher record with a decrypted record.
+
+
+
+
+
The filter uses an LRU (least recently used) strategy for caching decrypted DEKs. Decrypted DEKs are kept in memory to reduce interactions with the KMS.
+
+
+
+
1.3. How the filter uses the KMS
+
+
To support the filter, the KMS provides the following:
+
+
+
+
+
A secure repository for storing Key Encryption Keys (KEKs)
+
+
+
A service for generating and decrypting Data Encryption Keys (DEKs)
+
+
+
+
+
KEKs stay within the KMS. The KMS generates a DEK (which is securely generated random data) for a given KEK, then returns the DEK and an encrypted DEK. The encrypted DEK has the same data but encrypted with the KEK. The KMS doesn’t store encrypted DEKs; they are stored as part of the cipher record in the broker.
+
+
+
+
+
+
+
The KMS must be available during runtime. If the KMS is unavailable, the filter will not be able to obtain new encrypted DEKs on the produce path or decrypt encrypted DEKs on the consume path. The filter will continue to use previously obtained DEKs, but eventually, production and consumption will become impossible. It is recommended to use the KMS in a high availability (HA) configuration.
+
+
+
+
+
+
+
1.4. Practicing key rotation
+
+
Key rotation involves periodically replacing cryptographic keys with new ones and is considered a best practice in cryptography.
+
+
+
The filter allows the rotation of Key Encryption Keys (KEKs) within the Key Management System (KMS). When a KEK is rotated, the new key material is eventually used for newly produced records. Existing records, encrypted with older KEK versions, remain decryptable as long as the previous KEK versions are still available in the KMS.
+
+
+
+
+
+
+
If your encrypted topic is receiving regular traffic, the Data Encryption Key (DEK) will be refreshed as new records flow through. However, if messages are infrequent, the DEK might be used for up to 2 hours (by default) after its creation.
+
+
+
+
+
+
When the KEK is rotated in the external KMS, it will take up to 1 hour (by default) before all records produced by the filter contain a DEK encrypted with the new key material. This is because existing encrypted DEKs are used for a configurable amount of time after creation, the Filter caches the encrypted DEK, one hour after creation they are eligible to be refreshed.
+
+
+
If you need to rotate key material immediately, execute a rolling restart of your cluster of Kroxylicious instances.
+
+
+
+
+
+
+
If an old KEK version is removed from the KMS, records encrypted with that key will become unreadable, causing fetch operations to fail. In such cases, the consumer offset must be advanced beyond those records.
+
+
+
+
+
+
+
1.5. What part of a record is encrypted?
+
+
The record encryption filter encrypts only the values of records, leaving record keys, headers, and timestamps untouched. Null record values, which might represent deletions in compacted topics, are transmitted to the broker unencrypted. This approach ensures that compacted topics function correctly.
+
+
+
+
1.6. Unencrypted topics
+
+
You may configure the system so that some topics are encrypted and others are not. This supports scenarios where topics with confidential information are encrypted and Kafka topics with non-sensitive information can be left unencrypted.
This section assumes that you already have a supported KMS instance up and running. It describes how to prepare the KMS for use with the filter.
+
+
+
2.1. Preparing HashiCorp Vault
+
+
To use HashiCorp Vault with the Record Encryption filter, use the following setup:
+
+
+
+
+
Enable the Transit Engine as the Record Encryption filter relies on its APIs.
+
+
+
Create a Vault policy specifically for the filter with permissions for generating and decrypting Data Encryption Keys (DEKs) for envelope encryption.
+
+
+
Obtain a Vault token that includes the filter policy.
+
+
+
+
+
2.1.1. Enable the Transit Engine
+
+
The filter integrates with the HashiCorp Vault Transit Engine. Vault does not enable the Transit Engine by default. It must be enabled before it can be used by the filter.
+
+
+
Vault Transit Engine URL
+
+
The Vault Transit Engine URL is required so the filter knows the location of the Transit Engine within the Vault instance.
+
+
+
The URL is formed from the concatenation of the Api Address (reported by Vault during startup) with the complete path to Transit Engine, including the name of the engine itself.
+
+
+
If Namespacing is used on the Vault instance, the path must include the namespaces. The URL will end with /transit unless the -path parameter was used when enabling the engine.
+
+
+
If namespacing is not in use, the URL looks like this:
+
+
+
+
https://myvaultinstance:8200/v1/transit
+
+
+
+
If namespacing is in use, the path must include the namespaces. For example, if there is a parent namespace is a and a child namespace is b, the URL looks like this:
+
+
+
+
https://myvaultinstance:8200/v1/a/b/transit
+
+
+
+
If the name of the Transit engine was changed (using the -path argument to the vault secrets enable transit command) the URL looks like this:
+
+
+
+
https://myvaultinstance:8200/v1/mytransit
+
+
+
+
+
Role of the administrator
+
+
To use the filter, an administrator or an administrative process must create the encryption keys within Vault, which are used by the envelope encryption process.
+
+
+
The organization deploying the Record Encryption filter is responsible for managing this administrator or process.
+
+
+
The administrator must have permissions to create keys beneath transit/keys/KEK-* in the Vault hierarchy.
+
+
+
As a guideline, the minimal Vault policy required by the administrator is as follows:
The filter must authenticate to Vault in order to perform envelope encryption operations, such as generating and decrypting DEKs Therefore, a Vault identity with sufficient permissions must be created for the filter.
The -period switch causes the token to be periodic (with every renewal using the given period).
+
+
+
The -no-default-policy switch detaches the policy from the default policy. This is done so the token has least-privilege.
+
+
+
The -orphan switch create the token with no parent. This is done so that expiration of a parent won’t expire the token used by the filter.
+
+
+
+
+
+
+
+
+
The example token create command illustrates the use of -no-default-policy and -orphan. The use of these flags is not functionally important. You may adapt the configuration of the token to suit the standards required by your organization.
+
+
+
+
+
+
The token create command yields the token. The token value is required later when configuring the vault within the filter.
The script should respond Ok. If errors are reported check the policy/token configuration.
+
+
+
transit/keys/KEK-testkey can now be removed.
+
+
+
+
+
+
2.2. Preparing AWS KMS
+
+
To prepare AWS Key Management Service for use with the Record Encryption filter, use the following setup:
+
+
+
+
+
Establish an AWS KMS aliasing convention for keys
+
+
+
Create a policies giving permissions to the key aliases.
+
+
+
Create a user for use by the filter and attach the policies to it.
+
+
+
+
+
You’ll need a privileged AWS user that is capable of creating users and policies to perform the set-up.
+
+
+
2.2.1. Establish an aliasing convention for keys within AWS KMS
+
+
The filter references KEKs within AWS via an AWS key alias.
+
+
+
Establish a naming convention for key aliases to keep the filter’s keys separate from those used by other systems. Here, we use a prefix of KEK- for filter aliases. Adjust the instructions if a different naming convention is used.
+
+
+
Role of the administrator
+
+
To use the filter, an administrator or an administrative process must create the encryption keys within AWS KMS, which are used by the envelope encryption process.
+
+
+
The organization deploying the Record Encryption filter is responsible for managing this administrator or process.
+
+
+
The administrator must have permissions to create keys in AWS KMS. As a starting point, the built-in AWS policy AWSKeyManagementServicePowerUser confers sufficient key management privileges.
+
+
+
To get started, use the following commands to set up an administrator with permissions suitable for managing encryption keys in KMS through an AWS Cloud Shell. This example illustrates using the user name kroxylicious-admin, but you can choose a different name if preferred. Adjust the instructions accordingly if you use a different user name.
+
+
+
+
ADMIN=kroxylicious-admin
+INITIAL_PASSWORD=$(aws secretsmanager get-random-password --output text)
+CONSOLE_URL=https://$(aws sts get-caller-identity --query Account --output text).signin.aws.amazon.com/console
+aws iam create-user --user-name ${ADMIN}
+aws iam attach-user-policy --user-name ${ADMIN} --policy-arn arn:aws:iam::aws:policy/AWSKeyManagementServicePowerUser
+aws iam attach-user-policy --user-name ${ADMIN} --policy-arn arn:aws:iam::aws:policy/IAMUserChangePassword
+aws iam attach-user-policy --user-name ${ADMIN} --policy-arn arn:aws:iam::aws:policy/AWSCloudShellFullAccess
+aws iam create-login-profile --user-name ${ADMIN} --password "${INITIAL_PASSWORD}" --password-reset-required
+echo Now log in at ${CONSOLE_URL} with user name ${ADMIN} password "${INITIAL_PASSWORD}" and change the password.
+
+
+
+
+
Create an alias-based policy for KEK aliases
+
+
Create an alias-based policy granting permissions to use keys aliased by the established alias naming convention.
Establish an authentication mechanism for the filter
+
+
The filter must authenticate to AWS in order to perform envelope encryption operations, such as generating and decrypting DEKs.
+
+
+
+
Authenticating using long-term IAM identity
+
+
This procedure describes how to create a long-term IAM identity for the Record Encryption filter to authenticate to AWS KMS. The process involves creating an IAM user and access key, and attaching an alias-based policy that grants permissions to perform KMS operations on specific KEKs.
+
+
+
+
+
+
+
Do not enable console access for this user. The filter requires only API access, and console access would unnecessarily increase the security risk.
+
+
+
+
+
+
Prerequisites
+
+
+
Access to the AWS CLI with sufficient permissions to create and manage IAM users.
This step grants the user permission to perform KMS operations on KEKs that use the alias naming convention defined in the KroxyliciousRecordEncryption policy.
+
+
+
+
Verify that the policy has been successfully attached:
+
+
+
aws iam list-attached-user-policies --user-name kroxylicious
+
+
+
+
+
+
+
+
Authenticating using AWS EC2 metadata
+
+
This procedure describes how to use AWS EC2 metadata for the Record Encryption filter to authenticate to AWS KMS. The process involves creating a trust policy, creating an IAM role, and attaching an alias-based policy that grants permissions to perform KMS operations on specific KEKs.
+
+
+
The filter authenticates using the temporary credentials retrieved from EC2 instance metadata.
+
+
+
Prerequisites
+
+
+
Access to the AWS CLI with sufficient permissions to create and manage IAM users.
This step grants the role permission to perform KMS operations on KEKs that use the alias naming convention defined in the KroxyliciousRecordEncryption policy.
+
+
+
+
Verify that the policy has been successfully attached:
+
+
+
aws iam list-attached-role-policies --role-name KroxyliciousInstance
This procedure describes how to use EKS Pod Identity to authenticate the Record Encryption filter to AWS KMS when Kroxylicious is running on Amazon EKS.
+
+
+
EKS Pod Identity is the AWS-recommended successor to IRSA. Compared with IRSA it does not require an OIDC trust-policy edit on the IAM role: instead an in-cluster Pod Identity Agent exchanges a projected service-account token for temporary AWS credentials and exposes them on a link-local HTTP endpoint inside each pod. The Record Encryption filter retrieves the credentials from that endpoint and refreshes them automatically before they expire.
Unlike IRSA there is no OIDC sub / aud condition to misconfigure — the binding from role to service account is established via an EKS pod-identity association in a later step instead.
+
+
+
+
Create the IAM role using the trust policy:
+
+
+
aws iam create-role --role-name KroxyliciousPodIdentity --assume-role-policy-document file://trustpolicy
The Pod Identity Agent automatically projects the service-account token and sets the environment variables AWS_CONTAINER_CREDENTIALS_FULL_URI and AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE on each new pod that uses the named service account. The Record Encryption filter reads these environment variables by default, so an empty podIdentityCredentials: {} block in the AWS KMS configuration is sufficient on a properly-associated pod.
+
+
+
+
Restart the Kroxylicious pods so that the agent can inject the projection:
Authenticating using IAM Roles for Service Accounts (IRSA)
+
+
This procedure describes how to use IAM Roles for Service Accounts (IRSA) to authenticate the Record Encryption filter to AWS KMS when Kroxylicious is running on Amazon EKS.
+
+
+
The filter authenticates by exchanging the projected service-account OIDC token for temporary credentials from AWS Security Token Service (STS) using the AssumeRoleWithWebIdentity action. The credentials are refreshed automatically before they expire.
The sub condition restricts the role to only the named service account, and the aud condition restricts to STS as the audience. This is the part of IRSA that is most often misconfigured — a mismatch here results in InvalidIdentityToken errors at runtime.
+
+
+
+
Create the IAM role using the trust policy:
+
+
+
aws iam create-role --role-name KroxyliciousIRSA --assume-role-policy-document file://trustpolicy
+
+
+
+
This example uses KroxyliciousIRSA as the role name, but you can substitute a different name if necessary.
+
+
+
+
Attach the alias-based policy to the role:
+
+
+
aws iam attach-role-policy --policy-arn arn:aws:iam::${AWS_ACCOUNT_ID}:policy/KroxyliciousRecordEncryption --role-name KroxyliciousIRSA
+
+
+
+
+
Annotate the Kroxylicious service account so that the EKS pod-identity webhook injects the role ARN and projected token at pod-creation time:
The webhook automatically projects the OIDC token at /var/run/secrets/eks.amazonaws.com/serviceaccount/token and sets the environment variables AWS_ROLE_ARN and AWS_WEB_IDENTITY_TOKEN_FILE on the pod. The Record Encryption filter reads these environment variables by default, so an empty webIdentityCredentials: {} block in the AWS KMS configuration is sufficient on a properly-annotated pod.
+
+
+
+
Restart the Kroxylicious pods so that the webhook can inject the projection:
To prepare Azure Key Vault for use with the Record Encryption filter, use the following setup:
+
+
+
+
+
Setup Azure resources
+
+
+
Establish a naming convention for keys
+
+
+
+
+
You’ll need a privileged Azure user that is capable of creating users and resources to perform the set-up.
+
+
+
2.3.1. Setting up Azure resources
+
+
This procedure describes how to prepare the Azure resources required to use the Kroxylicious Record Encryption filter with Azure Key Vault. This process uses the Azure CLI to create a resource group, provision a key vault, configure a key management user, and establish an authentication method that the filter will use to access the key vault.
+
+
+
Prerequisites
+
+
+
An Azure subscription that includes Azure Key Vault
+
+
+
An Azure user with sufficient permissions to create and manage users, resource groups, roles, and resources
+
+
+
+
+
Procedure
+
+
+
Create a resource group
+
+
Before you create a key vault, you must create a resource group to contain it.
+
+
+
If you deploy Kroxylicious into multiple different environments, you must have separate resource groups and key vaults for each environment.
+
+
+
+
+
+
+
Resource groups are tied to the region they were created with. While resources (such as key vaults) within a resource group can be deployed to any region, Microsoft recommends that they should be located in the same region as the resource group. Keep this in mind when choosing a region for your resource groups.
+
+
+
+
+
+
If using the Azure CLI, you can create a resource group with a command like this:
+
+
+
+
az group create --name "my-resource-group" --location "eastus"
RESOURCE_GROUP_NAME should match the name of the resource group created in the previous section.
+
+
There are several methods to create key vault resources in Azure. This step describes how to do this using the Azure CLI. The Microsoft Azure documentation has further instructions on how to create key vaults via the Azure portal, Azure CLI, and Azure PowerShell.
+
+
+
+
+
+
+
Microsoft’s best practice guide for Azure Key Vault recommends having one key vault per application, per environment, per region. For example, if you have Kroxylicious deployed in your development, test, and production environments, and each of those environments is deployed across two different regions, you would create six key vaults for Kroxylicious — one for each Kroxylicious deployment in each environment in each region.
+
+
+
+
+
+
+
+
+
+
Create a key management user.
+
+
To use the filter, a user such as the Key Vault Owner or any user with the Key Vault Crypto Officer RBAC role must create the encryption keys in the key vault. These keys are used by the envelope encryption process.
+
+
+
When you create a key vault, only the creator (the Key Vault Owner) can manage it. Access to key vaults is controlled through RBAC roles. A user with either the Role Based Access Control Administrator role or the User Access Administrator role can grant those roles to others. For information on exceptions, see the Microsoft documentation.
+
+
+
The following Azure CLI commands demonstrate creating a user in Microsoft Entra ID and granting key management RBAC privileges. This example illustrates using the domain example.com but you must use your own domain, such as a *.onmicrosoft.com domain. Substitute the value in the USER_NAME field (kroxylicious-user) for a different username if preferred.
+
+
+
+
KEY_VAULT_NAME="kroxylicious-key-vault"
+RESOURCE_GROUP_NAME="my-resource-group"
+
+SUBSCRIPTION_ID=$(az account show --query id --output tsv)
+PORTAL_URL="$(az cloud show --query endpoints.portal --output tsv)/$(az account show --query tenantId --output tsv)"
+
+USER_NAME="kroxylicious-user"
+DEFAULT_DOMAIN=$(az rest --method get --url https://graph.microsoft.com/v1.0/domains --query "value[?isDefault].id" -o tsv)
+USER_PRINCIPAL_NAME="${USER_NAME}@${DEFAULT_DOMAIN}"
+INITIAL_PASSWORD=$(tr -dc 'A-Za-z0-9!?%=' < /dev/urandom | head -c 16)
+
+# Create user
+az ad user create \
+ --display-name "${USER_NAME}" \
+ --user-principal-name "${USER_PRINCIPAL_NAME}" \
+ --password "${INITIAL_PASSWORD}" \
+ --force-change-password-next-sign-in true
+
+# Create RBAC role assignment
+az role assignment create \
+ --role "Key Vault Crypto Officer" \
+ --assignee ${USER_PRINCIPAL_NAME} \
+ --scope "/subscriptions/${SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_NAME}/providers/Microsoft.KeyVault/vaults/${KEY_VAULT_NAME}"
+
+echo "Now log in at ${PORTAL_URL} with user name ${USER_PRINCIPAL_NAME} and password \"${INITIAL_PASSWORD}\" and change the password."
+
+
+
+
+
+
View all subscriptions for the logged-in user with az account list, and change the active subscription with az account set.
+
+
+
The Azure portal URL varies depending on your current Azure cloud.
+
+
+
The Key Vault Crypto Officer role lets a user perform any action on the keys within the key vault identified in the --scope, except managing permissions.
+
+
+
The scope in the role assignment command specifies the full scope hierarchy of the key vault. For more information on this format, see the Microsoft documentation.
+
+
+
+
+
+
Establish an authentication mechanism for the filter.
+
+
The filter must authenticate to Azure Key Vault in order to perform envelope encryption operations, such as generating and decrypting DEKs. See the following procedures for details of the authentication methods available.
+
+
+
+
+
+
Authenticating with Microsoft Identity Platform via OAuth 2.0
+
+
This procedure describes how to create a Service Principal (an application identity) within Microsoft Entra ID to allow the Record Encryption filter to authenticate to Azure Key Vault using the OAuth 2.0 Client Credentials flow. The process uses the Azure CLI to create the application identity and assign the required Azure Role-Based Access Control (RBAC) role.
+
+
+
+
+
+
+
This identity is used solely for application-to-application (machine-to-machine) authentication. Grant the Service Principal only the minimum permissions needed to access Azure Key Vault to avoid increasing security risk.
+
+
+
+
+
+
Prerequisites
+
+
+
Access to the Azure CLI
+
+
+
A user with permissions to create and manage service principals and RBAC roles
+
+
+
+
+
Procedure
+
+
+
Create the service principal and retrieve the credentials:
+
+
+
az ad sp create-for-rbac --name "kroxylicious" --query '[appId, password, tenant, appId]' --output tsv
+
+
+
+
This command creates the Service Principal and outputs, in order, the Client ID, Client Secret, Tenant ID, and App ID of the Service Principal. You require the Principal ID for the next step. You can replace kroxylicious with a different user name.
+
+
+
+
Assign the built-in Azure Key Vault RBAC role to the service principal:
+
+
+
PRINCIPAL_ID="00000000-0000-0000-0000-000000000000"
+KEY_VAULT_NAME="kroxylicious-key-vault"
+RESOURCE_GROUP="my-resource-group"
+SCOPE_ID=$(az keyvault show --name ${KEY_VAULT_NAME} --resource-group ${RESOURCE_GROUP} --query "id" --output tsv)
+az role assignment create --assignee ${PRINCIPAL_ID} --role "Key Vault Crypto Service Encryption User" --scope ${SCOPE_ID}
+
+
+
+
+
+
Set PRINCIPAL_ID to the App ID output in the previous step.
+
+
+
Set RESOURCE_GROUP to the resource group where your key vault is deployed.
+
+
These commands assign the built-in Key Vault Crypto Service Encryption User role to the Service Principal, scoped to the target Key Vault. Replace KEY_VAULT_NAME and RESOURCE_GROUP with your actual values.
+
+
+
+
+
+
+
(Optional) If you do not use the built-in RBAC roles, assign the following permissions to the managed identity instead:
+
+
+
+
Microsoft.KeyVault/vaults/keys/read
+
+
+
Microsoft.KeyVault/vaults/keys/wrap/action
+
+
+
Microsoft.KeyVault/vaults/keys/unwrap/action
+
+
If you are using Managed HSM, also assign Microsoft.KeyVault/managedhsms/rng/action.
+
+
+
+
+
+
+
Verify that the Service Principal has the correct role assignment and scope:
+
+
+
PRINCIPAL_ID="00000000-0000-0000-0000-000000000000"
+SCOPE_ID=$(az keyvault show --name ${KEY_VAULT_NAME} --resource-group ${RESOURCE_GROUP} --query "id" --output tsv)
+az role assignment list --assignee "${PRINCIPAL_ID}" --query '[].{Role:roleDefinitionName, Scope:scope}' --output tsv --scope ${SCOPE_ID}
+
+
+
+
+
+
PRINCIPAL_ID is the object identifier (Service Principal App ID) we output in the first step.
+
+
Confirm that the output shows the Key Vault Crypto Service Encryption User role and the full resource ID of your key vault (for example, /subscriptions/…/resourceGroups/…/vaults/kroxylicious-key-vault).
+
+
+
+
+
+
+
+
+
+
Authenticating with Managed Identities for Azure resources
+
+
This procedure describes how to use a System-assigned Managed Identity (on the Azure resource where Kroxylicious is deployed) to allow the Record Encryption filter to authenticate to Azure Key Vault. The process uses the Azure CLI to enable the identity on the hosting resource and grant it the required Azure Role-Based Access Control (RBAC) role.
+
+
+
+
+
+
+
A Managed Identity is a type of Service Principal that Azure manages automatically and does not require credentials. Assigning RBAC roles to a Managed Identity follows the same process as for other Service Principals.
+
+
+
+
+
+
Prerequisites
+
+
+
Access to the Azure CLI
+
+
+
A user with sufficient permissions to manage the hosting resource and assign RBAC roles.
+
+
+
The Azure resource that hosts Kroxylicious must be provisioned in advance.
+
+
+
+
+
Procedure
+
+
+
Enable the system-assigned managed identity on the hosting resource:
Set HOST_RESOURCE_NAME to the name of the Azure resource that hosts Kroxylicious.
+
+
+
Replace <RESOURCE_TYPE> with the correct Azure CLI subcommand for your resource type (for example, vm).
+
+
+
The command outputs the identifier of the system-assigned managed identity. Use this identifier in the next step.
+
+
+
+
+
+
Assign the built-in Azure Key Vault RBAC role to the managed identity:
+
+
+
SYSTEM_ASSIGNED_ID="x.kroxylicious-host"
+KEY_VAULT_NAME="kroxylicious-key-vault"
+RESOURCE_GROUP="my-resource-group"
+SCOPE_ID=$(az keyvault show --name ${KEY_VAULT_NAME} --resource-group ${RESOURCE_GROUP} --query "id" --output tsv)
+az role assignment create --assignee ${SYSTEM_ASSIGNED_ID} --role "Key Vault Crypto Service Encryption User" --scope ${SCOPE_ID}
+
+
+
+
+
+
Set SYSTEM_ASSIGNED_ID to the managed identity’s identifier from the previous step.
+
+
+
Set RESOURCE_GROUP to the name of the resource group that contains your key vault. This may differ from the resource group that hosts Kroxylicious.
+
+
These commands assign the built-in Key Vault Crypto Service Encryption User role to the Managed Identity, using the target Key Vault as the scope. Replace HOST_RESOURCE_NAME, KEY_VAULT_NAME, and RESOURCE_GROUP with your actual values.
+
+
+
+
+
+
+
(Optional) If you do not use the built-in RBAC roles, assign the following permissions to the managed identity instead:
+
+
+
+
Microsoft.KeyVault/vaults/keys/read
+
+
+
Microsoft.KeyVault/vaults/keys/wrap/action
+
+
+
Microsoft.KeyVault/vaults/keys/unwrap/action
+
+
If you are using Managed HSM, also assign Microsoft.KeyVault/managedhsms/rng/action.
+
+
+
+
+
+
+
Confirm the role has been successfully assigned:
+
+
+
SYSTEM_ASSIGNED_ID="x.kroxylicious-host"
+az role assignment list --assignee ${SYSTEM_ASSIGNED_ID} --query '[].{Role:roleDefinitionName, Scope:scope}' --output tsv
+
+
+
+
The output lists the Key Vault Crypto Service Encryption User role and the full resource ID of your key vault.
2.4. Preparing Fortanix Data Security Manager (DSM)
+
+
To prepare Fortanix Data Security Manager (DSM) for use with the Record Encryption filter, use the following setup:
+
+
+
+
+
Establish a naming convention for keys and choose a Fortanix group where the keys will reside.
+
+
+
Create an application identity, with an API key, for the Record Encryption filter.
+
+
+
+
+
2.4.1. Integrate with Fortanix DSM
+
+
The filter integrates with the Fortanix Data Security Manager (DSM). Both Fortanix DSM software-as-a service (SaaS) or an on-premise installation are supported.
+
+
+
These instructions assume that you are using the Fortanix DSM CLI, but you can use the Fortanix DSM user interface if preferred.
+
+
+
+
+
+
+
The Fortanix KMS plugin for Record Encryption doesn’t yet support keys in the Deactivated state. For more information, see the related issue.
+
+
+
+
+
+
Fortanix DSM Cluster URL
+
+
The Record Encryption filter requires the URL of the Fortanix DSM cluster.
If using an on-premises instance, talk to the group responsible for it within your organization to find out the URL you should use.
+
+
+
+
Establish a naming convention for keys within Fortanix DSM
+
+
Establish a naming convention for keys to keep the filter’s keys separate from those used by other systems. Here, we use a prefix of KEK- for filter key name.
+
+
+
Choose the Fortanix DSM groups to keep the keys. Here, we assume a group name of topic-keks.
+
+
+
Adjust the instructions if a different naming convention is used.
+
+
+
+
Role of the administrator
+
+
To use the filter, an administrator or an administrative process must create the encryption keys within Fortanix DSM, which are used by the envelope encryption process.
+
+
+
The organization deploying the Record Encryption filter is responsible for managing this administrator or process.
+
+
+
The administrator must have permissions to create keys with Fortanix DSM.
+
+
+
+
Establish an application identity for the filter
+
+
The filter must authenticate to Fortanix DSM in order to perform the encryption and decryption operations.
+
+
+
Create a Fortanix DSM App with sufficient permissions for the filter:
The Record Encryption filter uses the API Key in its KMS configuration to authenticate to Fortanix DSM.
+
+
+
+
+
+
+
+
3. Configuring the Record Encryption filter
+
+
+
This section describes at a high level how to configure the Record Encryption filter using a previously prepared KMS. Subsections provide in-depth details.
For HashiCorp Vault, the KMS configuration used by the filter looks like this. Use the Vault Token and Vault Transit Engine URL values from the KMS setup.
kms specifies the name of the KMS provider. Use AwsKmsService.
+
+
+
kmsConfig specifies the configuration of the KMS service.
+
+
+
+
endpointUrl specifies the AWS KMS endpoint URL, which must include the https:// scheme.
+
+
+
credentials groups the authentication configuration.
+
+
+
+
longTerm configures the KMS to authenticate with long-term credentials.
+
+
+
+
accessKeyId references a file containing the AWS access key ID.
+
+
+
secretAccessKey references a file containing the AWS secret access key.
+
+
+
+
+
+
+
+
+
region defines the AWS region identifier where KMS resources are located, such as us-east-1. This must match the region of the KMS endpoint you’re using.
+
+
+
+
+
+
+
+
+
+
+
+
The legacy top-level longTermCredentials key is deprecated but continues to work for backward compatibility. New configurations should use credentials.longTerm instead.
+
+
+
+
+
+
Alternatively, the configuration for authenticating with EC2 metadata looks like this:
+
+
+
Configuration for authenticating with EC2 metadata
kms specifies the name of the KMS provider. Use AwsKmsService.
+
+
+
kmsConfig specifies the configuration of the KMS service.
+
+
+
+
endpointUrl AWS KMS endpoint URL, which must include the https:// scheme.
+
+
+
credentials groups the authentication configuration.
+
+
+
+
ec2Metadata configures the KMS to authenticate using EC2 metadata.
+
+
+
+
iamRole specifies the name of the IAM role associated with the EC2 instances hosting Kroxylicious.
+
+
+
metadataEndpoint (Optional) specifies the metadata endpoint used to obtain EC2 metadata. Defaults to http://169.254.169.254/. If using IPv6, use http://[fd00:ec2::254] instead.
+
+
+
credentialLifetimeFactor (Optional) defines the factor used to determine when to refresh a credential before it expires. Defaults to 0.8, which means the credential is refreshed once it reaches 80% of its lifetime.
+
+
+
+
+
+
+
+
+
region defines the AWS region identifier where KMS resources are located, such as us-east-1. This must match the region of the KMS endpoint you’re using.
+
+
+
+
+
+
+
+
+
+
+
+
The legacy top-level ec2MetadataCredentials key is deprecated but continues to work for backward compatibility. New configurations should use credentials.ec2Metadata instead.
+
+
+
+
+
+
Alternatively, the configuration for authenticating with EKS Pod Identity looks like this:
+
+
+
Configuration for authenticating with EKS Pod Identity
kms specifies the name of the KMS provider. Use AwsKmsService.
+
+
+
kmsConfig specifies the configuration of the KMS service.
+
+
+
+
endpointUrl AWS KMS endpoint URL, which must include the https:// scheme.
+
+
+
credentials groups the authentication configuration.
+
+
+
+
podIdentity configures the KMS to authenticate using the EKS Pod Identity agent. All sub-fields are optional and default to the values injected by the agent webhook, so on a properly-associated pod the entire block can be written as credentials: { podIdentity: {} }.
+
+
+
+
credentialsFullUri (Optional) URL of the Pod Identity Agent credentials endpoint. Defaults to the AWS_CONTAINER_CREDENTIALS_FULL_URI environment variable.
+
+
+
authorizationTokenFile (Optional) path to the projected service-account token file used to authenticate to the agent. Defaults to the AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable.
+
+
+
credentialLifetimeFactor (Optional) defines the factor used to determine when to refresh a credential before it expires. Defaults to 0.8, which means the credential is refreshed once it reaches 80% of its lifetime.
+
+
+
+
+
+
+
+
+
region defines the AWS region identifier where KMS resources are located, such as us-east-1. This must match the region of the KMS endpoint you’re using.
+
+
+
+
+
+
+
+
Alternatively, the configuration for authenticating with IAM Roles for Service Accounts (IRSA) on Amazon EKS looks like this:
+
+
+
Configuration for authenticating with IAM Roles for Service Accounts (IRSA)
kms specifies the name of the KMS provider. Use AwsKmsService.
+
+
+
kmsConfig specifies the configuration of the KMS service.
+
+
+
+
endpointUrl AWS KMS endpoint URL, which must include the https:// scheme.
+
+
+
credentials groups the authentication configuration.
+
+
+
+
webIdentity configures the KMS to authenticate using a Kubernetes service-account OIDC token via STS AssumeRoleWithWebIdentity. All sub-fields are optional and default to the values injected by the EKS pod-identity webhook, so on a properly-annotated pod the entire block can be written as credentials: { webIdentity: {} }.
+
+
+
+
roleArn (Optional) ARN of the IAM role to assume. Defaults to the AWS_ROLE_ARN environment variable.
+
+
+
webIdentityTokenFile (Optional) path to the projected service-account OIDC token. Defaults to the AWS_WEB_IDENTITY_TOKEN_FILE environment variable.
+
+
+
roleSessionName (Optional) the session name reported to AWS CloudTrail. Defaults to the AWS_ROLE_SESSION_NAME environment variable, falling back to a generated kroxylicious-<uuid> value.
+
+
+
stsEndpointUrl (Optional) the STS endpoint URL to call. Defaults to https://sts.<region>.amazonaws.com derived from the surrounding region value. Override for non-standard partitions (e.g. China: sts.<region>.amazonaws.com.cn).
+
+
+
durationSeconds (Optional) requested duration of the assumed-role session. When omitted, STS uses the role’s configured maximum session duration.
+
+
+
credentialLifetimeFactor (Optional) defines the factor used to determine when to refresh a credential before it expires. Defaults to 0.8, which means the credential is refreshed once it reaches 80% of its lifetime.
+
+
+
+
+
+
+
+
+
region defines the AWS region identifier where KMS resources are located, such as us-east-1. This must match the region of the KMS endpoint you’re using.
+
+
+
+
+
+
+
+
A TLS client certificate can be specified using a PKCS#12 or JKS key store file.
+
+
+
Example TLS client certificate configuration using a PKCS#12 key store file
kms specifies the name of the KMS provider. Use FortanixDsmKmsService.
+
+
+
endpointUrl provides the Fortanix DSM Cluster URL including the protocol part, such as https: or http:.
+
+
+
passwordFile references a file containing the API key.
+
+
+
+
+
+
3.5. Filter configuration
+
+
This procedure describes how to configure the Record Encryption filter. Provide the filter configuration and the Key Encryption Key (KEK) selector to use. The KEK selector maps topic name to key names. The filter looks up the resulting key name in the KMS.
kmsConfig specifies configuration specific to the KMS provider.
+
+
+
selector specifies the Key Encryption Key (KEK) selector to use. The $(topicName) is a literal understood by the proxy. For example, if using the TemplateKekSelector with the template KEK-$(topicName), create a key for every topic that is to be encrypted with the key name matching the topic name, prefixed by the string KEK-.
+
+
+
+
template specifies the template for deriving the KEK, based on a specific topic name.
+
+
+
+
+
+
unresolvedKeyPolicy specifies policy governing the behaviour when the KMS does not contain a key. The default is PASSTHROUGH_UNENCRYPTED which causes the record to be forwarded, unencrypted, to the target cluster. Users can alternatively specify REJECT which will cause the entire produce request to be rejected. If all traffic sent to the virtual cluster must be encrypted, this is a safer alternative because unencrypted data is never forwarded.
encryptionDekRefreshAfterWriteSeconds specifies how long after creation of a DEK before it becomes eligible for rotation. On the next encryption request, the cache asynchronously creates a new DEK. Encryption requests continue to use the old DEK until the new DEK is ready.
+
+
+
encryptionDekExpireAfterWriteSeconds specifies how long after creation of a DEK until it is removed from the cache. This setting puts an upper bound on how long a DEK can remain cached.
+
+
+
maxEncryptionsPerDek specifies the maximum number of records any DEK can encrypt. After this limit is reached, that DEK is destroyed and a new DEK is created.
+
+
encryptionDekRefreshAfterWriteSeconds and encryptionDekExpireAfterWriteSeconds properties govern the originator usage period of the DEK, which is the amount of time the DEK remains valid for encrypting records. Shortening this period helps limit the impact if the DEK key material is leaked. However, shorter periods increase the number of KMS API calls, which might affect produce and consume latency and raise KMS provider costs.
+
+
+
maxEncryptionsPerDek helps prevent key exhaustion by placing an upper limit of the amount of times that a DEK may be used to encrypt records.
+
+
+
+
+
+
+
+
+
+
Verify that the encryption has been applied to the specified topics by producing messages through the proxy and then consuming directly and indirectly from the Kafka cluster.
+
+
+
+
+
+
+
+
+
If the filter is unable to find the key in the KMS, the filter passes through the records belonging to that topic in the produce request without encrypting them.
+
+
+
+
+
+
+
3.6. Example proxy configuration file
+
+
If your instance of the Kroxylicious Proxy runs directly on an operating system, provide the filter configuration in the filterDefinitions list of your proxy configuration. Here’s a complete example of a filterDefinitions entry configured for record encryption with Vault KMS:
This section assumes that you already have a supported KMS instance up and running and have configured the Record Encryption filter. It describes how to create KMS keys for encrypting records sent to topics.
+
+
+
4.1. Creating keys in HashiCorp Vault
+
+
To create a key in HashiCorp Vault for use with the Record Encryption filter, use the following procedure.
+
+
+
4.1.1. Creating HashiCorp Vault keys
+
+
This procedure describes how to create AES-256 symmetric keys for use with the Record Encryption filter. The procedure establishes a naming convention for keys, then uses the HashiCorp CLI to create a key with an optional rotation policy.
+
+
+
Prerequisites
+
+
+
Access to the HashiCorp Vault CLI with permissions to create keys.
+
+
+
Familiarity with basic HashiCorp Vault Transit Engine key management operations.
+
+
+
+
+
Procedure
+
+
+
Establish a naming convention for keys to ensure that the filter’s keys remain separate from those used by other systems.
+
+
In this example, a KEK- prefix is used for filter key names. Adjust the naming convention as needed.
The key type must be aes256-gcm96, which is the default type for the Transit Engine.
+
+
+
(Optional) The auto_rotate_period parameter enables automatic 90-day key rotation. Adjust the rotation period if required.
+
+
+
+
+
+
+
+
+
+
+
+
It is recommended to use a key rotation policy.
+
+
+
+
+
+
+
+
4.2. Creating keys in AWS KMS
+
+
To create a key in AWS Key Management Service for use with the Record Encryption filter, use the following procedure.
+
+
+
You’ll need a privileged AWS user that is capable of creating keys.
+
+
+
4.2.1. Creating AWS KMS keys
+
+
This procedure describes how to create a Symmetric key with encrypt and decrypt usage for the Record Encryption filter. The procedure uses the AWS CLI to create a key and, optionally, apply a rotation policy.
+
+
+
+
+
+
+
Multi-region keys are supported.
+
+
+
+
+
+
Prerequisites
+
+
+
Access to the AWS CLI with sufficient permissions to create and manage keys.
To create a key in Azure Key Vault for use with the Record Encryption filter, use the following procedure.
+
+
+
You’ll need a privileged Azure user that is capable of creating key resources to perform the set-up.
+
+
+
4.3.1. Creating Azure Key Vault keys
+
+
This procedure describes how to create a key with wrapKey and unwrapKey operations enabled for use with the Record Encryption filter. The procedure uses the Azure CLI to create a key and, optionally, apply a rotation policy.
+
+
+
Prerequisites
+
+
+
Access to the Azure CLI.
+
+
+
A user with the Key Vault Crypto Officer role or equivalent RBAC permissions to manage keys in Azure Key Vault.
+
+
+
Familiarity with basic Azure Key Vault key management operations.
+
+
+
+
+
Procedure
+
+
+
Establish a naming convention for keys within key vault for easy identification and use with the Record Encryption filter. Use separate key vaults per application, environment, and region. This ensures separation between the filter’s keys and those used by other systems.
+
+
+
Select a key type.
+
+
+
+
+
+
Available key types depend on your Azure Key Vault service tier. See the Azure Key Vault documentation for background information on key types.
+
+
+
+
+
+
The filter accepts the following key types and their associated wrapping algorithms:
+
+
+
+
+
Symmetric Keys (oct or oct-HSM): Uses the quantum-resistant A256GCM (256-bit AES GCM) wrapping algorithm, which requires a Managed HSM subscription.
+
+
+
Asymmetric Keys (RSA or RSA-HSM): Uses the RSA-OAEP-256 (RSAES-OAEP with SHA-256 hash and MGF1/SHA-256 mask) wrapping algorithm, which is not quantum-resistant. This wrapping algorithm does not require a Managed HSM subscription to use (however, RSA-HSM is not available without a premium Azure Key Vault subscription).
4.4. Creating keys in Fortanix Data Security Manager (DSM)
+
+
To create a key in Fortanix Data Security Manager (DSM) for use with the Record Encryption filter, use the following procedure.
+
+
+
4.4.1. Creating Fortanix DSM keys
+
+
This procedure describes how to create AES-256 symmetric keys for use with the Record Encryption filter. The procedure uses the Fortanix DSM CLI to create a key according to your naming convention and assign it to the required group.
+
+
+
Prerequisites
+
+
+
Access to the Fortanix DSM CLI with sufficient permissions to create keys and list groups.
+
+
+
The ID of the Fortanix DSM group that will contain the keys.
+
+
+
Familiarity with basic Fortanix DSM key management operations.
Specify the key operations as ENCRYPT,DECRYPT,APPMANAGEABLE. These are the minimal permissions required for record encryption to function.
+
+
+
+
+
+
(Optional) Enable a key rotation policy. Details can be found in the Fortanix DSM documentation.
+
+
+
+
+
+
+
+
+
5. Monitoring the Record Encryption filter
+
+
+
This section describes how to monitor the Record Encryption filter.
+
+
+
5.1. Record Encryption filter metrics
+
+
The filter emits metrics that provide insights into its interactions with the configured KMS. They indicate the load the filter places on the KMS infrastructure and how often its interactions with the KMS fail.
+
+
+
The filter emits metrics that count the number of records that are being encrypted. This can help you verify that the filter is configured properly and encrypting specific topics as intended.
+
+
+
These metrics are made available automatically once metrics are enabled in the proxy.
+
+
+
5.1.1. KMS metrics
+
+
KMS metrics track and count the following types of interactions:
+
+
+
+
+
Generating DEK pairs
+
+
+
Decrypting EDEKs
+
+
+
Resolving KEK aliases
+
+
+
+
+
Table 1. KMS metrics
+
+
+
+
+
+
+
+
+
Metric Name
+
Type
+
Labels
+
Description
+
+
+
+
+
+
kroxylicious_kms_operation_attempt_total
+
+
+
Counter
+
+
+
operation
+
+
+
Count of the number of KMS operations attempted.
+
+
+
+
+
kroxylicious_kms_operation_outcome_total
+
+
+
Counter
+
+
+
operation, outcome
+
+
+
Count of the number of KMS operations grouped by outcome.
+
+
+
+
+
+
Table 2. Labels used on the KMS metrics
+
+
+
+
+
+
+
+
Label
+
Domain
+
Description
+
+
+
+
+
+
operation
+
+
+
generate_dek_pair, decrypt_edek, resolve_alias
+
+
+
Type of operation performed.
+
+
+
+
+
outcome
+
+
+
SUCCESS, EXCEPTION, NOT_FOUND
+
+
+
Result of the operation.
+
+
+
+
+
+
+
5.1.2. Encryption accounting metrics
+
+
Encryption accounting metrics count the number of records sent to topics that are encrypted and the number of records sent to topics that are not configured for encryption. These metrics are discriminated by topic name. Use these metrics to confirm you configuration is having the effect you desired.
Count of the number of records not encrypted by the filter.
+
+
+
+
+
+
+
+
+
+
6. Operations
+
+
+
This section documents the operational aspects of using the record encryption filter.
+
+
+
6.1. Handling lost KEKs
+
+
This section describes how to recover or mitigate the loss of a Key Encryption Key (KEK) required for decryption.
+
+
+
A KEK is considered lost if it is no longer usable for decryption even though the Key Management System (KMS) remains accessible to the proxy. For example, the key might be scheduled for deletion or in an invalid state.
+
+
+
+
+
+
+
Do not delete KEKs from your KMS. Determining which KEKs are still required for decryption is complex and error-prone. If a KEK is deleted while encrypted records still depend on it, those records become unrecoverable. As a result, consuming applications will encounter errors and stop processing unless additional action is taken. Only follow the procedures in this section if absolutely necessary.
+
+
+
+
+
+
When a consumer attempts to fetch a record that cannot be decrypted, the proxy returns an error. The exact error depends on the Kafka client library:
+
+
+
+
Apache Kafka client
+
+
Unexpected error code 91 while fetching at offset n from topic-partition <topic>-<partition>
+
+
librdkafka-based client
+
+
Fetch from broker 0 failed at offset n (leader epoch 0): Broker: Request illegally referred to resource that does not exist
+
+
+
+
+
These errors indicate that the KEK required for decryption is missing. Error code 91 (RESOURCE_NOT_FOUND) is returned by the Record Encryption filter when the KEK is unavailable.
+
+
+
To confirm the issue, check the proxy logs for entries like the following:
+
+
+
+
Failed to decrypt record in topic-partition <topic>-<partition> owing to key not found condition.
+This will be reported to the client as a RESOURCE_NOT_FOUND(91).
+Client may see a message like 'Unexpected error code 91 while fetching at offset' (java) or 'Request illegally referred to resource that does not exist' (librdkafka).
+Cause message: key 'd691a642-d8b4-4445-b668-d390df7000bb' is not found (AWS error: ErrorResponse{type='KMSInvalidStateException', message='arn:aws:kms:us-east-1:000000000000:key/d691a642-d8b4-4445-b668-d390df7000bb is pending deletion.'}).
+Raise log level to DEBUG to see the stack.
+
+
+
+
If you confirm that a KEK is lost, take one of the following actions:
+
+
+
+
+
Cancel key deletion
+
+
+
Restore key from backup
+
+
+
Delete or skip affected records
+
+
+
+
+
The actions are listed in recommended order to help restore record consumption. After applying any of the strategies, restart all proxy instances to resume consuming records.
+
+
+
6.1.1. Cancel key deletion
+
+
Some KMS providers schedule keys for deletion instead of deleting them immediately. During this time, the key appears unavailable but can still be recovered:
+
+
+
+
+
Use your KMS console or API to check if the missing key is scheduled for deletion.
+
+
+
If so, cancel the deletion to restore the key.
+
+
+
+
+
Refer to the documentation of the KMS for more details.
+
+
+
+
6.1.2. Restore key from backup
+
+
If the key was backed up, restore it from the backup:
+
+
+
+
+
Use your KMS’s backup and restore tools to recover the KEK.
+
+
+
Ensure that you also restore the original key metadata, such as the key identifier. The Record Encryption filter uses the identifier to reference the KEK in cipher text records.
+
+
+
+
+
+
+
+
+
Restoring the key material alone does not ensure compatibility with encrypted records. You must also recover related metadata, such as the key identifier, to resume successful decryption.
+
+
+
+
+
+
+
6.1.3. Delete or skip affected records
+
+
If the KEK cannot be recovered, you must do one of the following:
+
+
+
+
+
Delete the encrypted records
+
+
+
Advance consumer group offsets to skip the affected records
+
+
+
+
+
The process is as follows:
+
+
+
+
+
Identify the earliest offset after which all records can be successfully decrypted.
+
+
Proxy instances may not switch to a new KEK at the same time, so records encrypted with different keys might appear together in the log. As a result, there may be no single offset where encryption clearly transitions from one KEK to the next.
+
+
+
Use kafka-console-consumer.sh with a binary search approach to find the lowest offset for each affected topic partition where decryption succeeds. Domain-specific knowledge can help narrow the search.
+
+
+
+
Use the new starting offset for each affected topic partition to do one of the following:
+
+
+
+
Delete records using kafka-delete-records.sh
+
+ This tool deletes all records up to the specified offset, including any that may still be readable.
+
+
+
Advance consumer group offsets using kafka-consumer-groups.sh
+
+ You must reset offsets for every consumer group that must skip the records that cannot be decrypted.
+
+
+
+
+
+
+
+
+
+
+
+
7. Trademark notice
+
+
+
+
+
Apache Kafka is a registered trademark of The Apache Software Foundation.
+
+
+
Kubernetes is a registered trademark of The Linux Foundation.
+
+
+
Prometheus is a registered trademark of The Linux Foundation.
+
+
+
Strimzi is a trademark of The Linux Foundation.
+
+
+
Hashicorp Vault is a registered trademark of HashiCorp, Inc.
+
+
+
AWS Key Management Service is a trademark of Amazon.com, Inc. or its affiliates.
+
+
+
Microsoft, Azure, and Microsoft Entra are trademarks of the Microsoft group of companies.
+
+
+
Fortanix and Data Security Manager are trademarks of Fortanix, Inc.
+
+
+
+
+
+
+
8. Glossary
+
+
+
Glossary of terms used in the Record Encryption guide.
+
+
+
+
DEK
+
+
Data Encryption Key, a secret key used to encrypt the kafka records
+
+
KEK
+
+
Key Encryption Key, a secret key that resides within the boundaries of a KMS.
+
+
KMS
+
+
Key Management System. A dedicated system for controlling access to cryptographic material, and providing operations which use that material.
+
+
+
+
+
+
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/record-encryption-guide/index.html b/documentation/0.21.0/html/record-encryption-guide/index.html
new file mode 100644
index 00000000..65ba1b03
--- /dev/null
+++ b/documentation/0.21.0/html/record-encryption-guide/index.html
@@ -0,0 +1,12 @@
+---
+layout: guide
+title: Record Encryption Guide
+description: Using the record encryption filter to provide encryption-at-rest for
+ Apache Kafka®.
+tags:
+ - security
+ - filter
+rank: '020'
+version: 0.21.0
+permalink: /documentation/0.21.0/html/record-encryption-guide/
+---
diff --git a/documentation/0.21.0/html/record-encryption-guide/toc.html b/documentation/0.21.0/html/record-encryption-guide/toc.html
new file mode 100644
index 00000000..dd331bfb
--- /dev/null
+++ b/documentation/0.21.0/html/record-encryption-guide/toc.html
@@ -0,0 +1,49 @@
+{% raw %}
+
+{% endraw %}
diff --git a/documentation/0.21.0/html/record-encryption-quick-start/content.html b/documentation/0.21.0/html/record-encryption-quick-start/content.html
new file mode 100644
index 00000000..a0efc769
--- /dev/null
+++ b/documentation/0.21.0/html/record-encryption-quick-start/content.html
@@ -0,0 +1,433 @@
+{% raw %}
+
+
+
+
+
+
+
Are you looking for an encryption-at-rest solution for data stored in your Apache Kafka?
+
+
+
This quick start guide will show you how to do that on Kubernetes … from scratch … without external dependencies. You’ll be encrypting in a snap!
+
+
+
+
+
1. Objectives
+
+
+
+
+
Deploy a Kafka Cluster to a Kubernetes cluster.
+
+
+
Deploy a Key Management Service (KMS) - we’ll show you how, with either HashiCorp Vault or AWS Localstack.
+
+
+
Deploy Kroxylicious and configure it to proxy the cluster and apply Record Encryption.
+
+
+
Produce/consume records to Kafka via the proxy demonstrating the transparent encryption.
+
+
+
Verify that the records are encrypted on the broker.
+
+
+
+
+
+
+
+
+
You really don’t want to use this quickstart to deploy an environment that will be used in production. The quickstart deliberately keeps things quick and simple… there’s no authentication, no TLS, no redundancy… Development purposes only - please! Refer to the documentation for Kroxylicious, Strimzi and your KMS for production best practices.
+
+
+
+
+
+
+
+
2. Overview
+
+
+
This diagram shows the important components that will be deployed by the quickstart. Arrows on the diagrams explain the important flows between them. The pods shown in yellow run the kafka console producer and kafka console consumers command line applications. These pods are used to demonstrate the record encryption in action.
+
+
+
The diagram omits the operators.
+
+
+
+
+
+
Figure 1. Important resources and the flows between them
Let’s get the Kubernetes Cluster up and running. Minikube defaults work just fine for this quickstart.
+
+
+
+
$ minikube start
+
+
+
+
+
+
5. Install the software
+
+
+
5.1. Install a KMS
+
+
Record Encryption needs somewhere to store its encryption keys, so you need to provide a KMS. Use Helm to install either HashiCorp Vault or AWS LocalStack.
+
+
+ AWS LocalStack
+
+
+
LocalStack is an AWS cloud service emulator that runs in a single container on your laptop or in your CI environment. It’s intended for developing and testing cloud & Serverless apps offline.
After installation, the LocalStack Helm chart prompts you to "Get the application URL". For this quickstart, you can ignore these steps.
+
+
+
+
+
+
+
+ HashiCorp Vault
+
+
+
HashiCorp Vault is available as a Cloud Service or standalone. In this guide, we take install it standalone on Minikube. Note that Record Encryption requires the Transit Secrets Engine, which must be enabled.
+
+
+
+
$ helm repo add --force-update hashicorp https://helm.releases.hashicorp.com
+# Note: SKIP_SETCAP=true is a workaround for Vault 2.x in rootless containers (SETFCAP capability unavailable). Acceptable for development/testing.
+$ helm upgrade --install vault hashicorp/vault --namespace kms --create-namespace --version 0.32.0 --set server.dev.enabled=true,server.dev.devRootToken=myroottoken,server.extraEnvironmentVars.SKIP_SETCAP=true --wait
First, create a temporary directory for some working files. We use a directory called ko-install-nnnnn beneath /tmp, but you can use any name and location you like.
+
+
+
+
$ QUICKSTART_DIR=/tmp/ko-install-${RANDOM}
+$ mkdir -p ${QUICKSTART_DIR}
+$ cd ${QUICKSTART_DIR}
+
+
+
+
Now let’s download and unpack the Kroxylicious Operator
Now we need a Kafka Cluster. This is the Kafka Cluster that will be proxied. We’ll just use one from the Strimzi Quickstart. It will create a cluster in the kafka namespace.
We’ll need the bootstrap server address of the Kafka cluster later, so let’s assign a variable containing it now.
+
+
+
+
$ DIRECT_CLUSTER_BOOTSTRAP_SERVERS=$(kubectl get -n kafka kafka my-cluster -o=jsonpath='{.status.listeners[0].bootstrapServers}')
+
+
+
+
+
+
7. Deploy the Proxy with Record Encryption
+
+
+
Next, we’ll deploy a Kroxylicious proxy instance using the record encryption example included in the install zip you downloaded when you installed the operator
+
+
+
It will create an instance of Kroxylicious that will proxy the kafka cluster you created above. It will configure it to use the Record Encryption filter to encrypt as records are sent by producers and decrypt them as records get fetched by consumers. The proxy will be created the my-proxy namespace
+
+
+
+
$ kubectl apply -f examples/record-encryption/
+
+
+
+
You need to update the example to find keys in your chosen KMS.
Finally, let’s assign a variable containing the virtual cluster’s bootstrap address. We’ll use this later to produce and consumer records through the proxy.
+
+
+
+
$ PROXIED_CLUSTER_BOOTSTRAP_SERVER=$(kubectl get -n my-proxy virtualkafkacluster my-cluster -o=jsonpath='{.status.ingresses[?(@.name=="cluster-ip")].bootstrapServer}')
+
+
+
+
+
+
8. Create an encryption key in the KMS
+
+
+
Record Encryption needs an encryption key to use when encrypting the records produced to the topic. Let’s create an encryption key in our KMS now.
+
+
+
The filter is configured to expect a key to exist in the KMS with the name KEK-<topic name>. We are going to be using a topic called trades so we will create a key that can be referred to using the name KEK-trades.
+
+
+ AWS LocalStack
+
+
+
With LocalStack, you need to create a key and an alias to that key.
Now let’s use Kafka’s console producer CLI to send a few records to a topic called trades. The Record Encryption filter will encrypt the records before they reach broker, but this is completely transparent to the console producer.
+
+
+
+
+
+
+
You can safely ignore the warning about the UNKNOWN_TOPIC_OR_PARTITION, the topic will be created automatically.
Now let’s use Kafka’s console consumer to fetch the records. You’ll see the two records we sent above. The Record Encryption filter will decrypt the records before they reach consumer, but this is completely transparent to the console consumer.
11. Verify that the records are encrypted on the broker
+
+
+
Consuming the same records we wrote is a bit underwhelming! And, in fact, it’s what we’d expect to see with a vanilla, unproxied, Kafka cluster. So how do we know the records really encrypted on the broker? Let’s use the console consumer again, but this time we’ll consume straight from the Kafka cluster, bypassing the proxy. We’ll get back the records, but they’ll contain ciphertext, rather than the plaintext.
+{% endraw %}
diff --git a/documentation/0.21.0/html/record-validation-guide/content.html b/documentation/0.21.0/html/record-validation-guide/content.html
new file mode 100644
index 00000000..b98a8804
--- /dev/null
+++ b/documentation/0.21.0/html/record-validation-guide/content.html
@@ -0,0 +1,439 @@
+{% raw %}
+
+
+
+
+
+
About this guide
+
+
This guide covers using the Kroxylicious Record Validation Filter to validate records sent by Kafka client to Kafka brokers. Refer to other Kroxylicious guides for information on running the proxy or for advanced topics such as plugin development.
+
+
+
The Record Validation filter validates records sent by a producer. Only records that pass the validation are sent to the broker. This filter can be used to prevent poison messages—such as those containing corrupted data or invalid formats—from entering the Kafka system, which may otherwise lead to consumer failure.
+
+
+
The filter currently supports three modes of operation:
+
+
+
+
+
Schema Validation ensures the content of the record conforms to a schema stored in an Apicurio Registry.
+
+
+
JSON Syntax Validation ensures the content of the record contains syntactically valid JSON.
+
+
+
JWS Signature Validation ensures the configured Kafka header contains a valid JSON Web Signature (JWS) Signature.
+
+
+
+
+
Validation rules can be applied to check the content of the Kafka record key or value.
+
+
+
If the validation fails, the product request is rejected and the producing application receives an error response. The broker will not receive the rejected records.
+
+
+
+
+
+
+
This filter is currently in incubation and available as a preview. We would not recommend using it in a production environment.
+
+
+
+
+
+
+
+
1. (Preview) Setting up the Record Validation filter
+
+
+
This procedure describes how to set up the Record Validation filter. You provide the filter configuration and rules used to validate Kafka record keys and values.
apicurioId identifies the schema that the filter enforces. By default, this ID refers to the Apicurio Registry’s contentId. If the record key or value does not conform to the schema identified by this ID, the record is rejected. If a Kafka producer embeds a schema ID in the record (either in a header or magic bytes at the start of the record’s key or value), the filter validates it against this value. The record is rejected if the IDs do not match.
+
+
+
apicurioRegistryUrl specifies the endpoint URL for the Apicurio Registry.
+
+
+
schemaType specifies the type of schema to validate against. Supported values are:
+
+
+
+
JSON_SCHEMA — validates against a JSON Schema (default if not specified).
+
+
+
AVRO — validates against an Apache Avro schema.
+
+
+
PROTOBUF — validates against a Protocol Buffers schema.
+
+
+
+
+
+
tls specifies the optional TLS configuration used when connecting securely to the Apicurio Registry.
+
+
+
+
storeFile path to the truststore file that contains the trust anchors used to validate the server connection.
+
+
+
storePassword (Optional) password used to protect the truststore.
+
+
+
storeType truststore type. Supported values include PKCS12, JKS, and PEM.
+
+
+
+
+
+
wireFormatVersion controls whether the Apicurio client operates in V3 (default) or V2 (deprecated). In V3 mode, the apicurioId property identifies schema by content ID, and the filter expects the Kafka producer to use content IDs to identify schema. In V2 mode, the apicurioId property identifies schema by global ID, and the filter expects the Kafka producer to use global IDs to identify schema. V2 mode exists only for compatibility with older Apicurio configurations and is deprecated. For new applications, use V3.
+
+
+
allowNulls specifies whether the validator allows record keys or values to be null. The default is false.
+
+
+
allowEmpty specifies whether the validator allows record keys or values to be empty. The default is false.
+
+
+
+
+
For more information about embedding the ID in a record, see the Apicurio documentation.
jwsSignatureValidation.trustedJsonWebKeySet specifies the set of JSON Web Keys (JWK) used to verify JSON Web Signature (JWS) signatures.
+
+
+
The JWS Signature validation rule verifies the JWS only and does not perform JSON Web Token (JWT) claim validation. Token claims such as expiration and issuer are not checked.
+
+
+
jwsSignatureValidation.algorithms.allowed and jwsSignatureValidation.algorithms.denied specify which signature algorithms are permitted.
+
+
+
+
If both allowed and denied are omitted or empty, all algorithms are blocked and JWS Signature verification fails.
+
+
+
If only allowed is specified, only the algorithms listed in allowed are permitted.
+
+
+
If only denied is specified, all algorithms except those listed in denied are permitted.
+
+
+
If both allowed and denied are specified, only the algorithms listed in allowed are permitted.
+
+
+
+
+
+
jwsSignatureValidation.recordHeader.key specifies the Kafka record header key that contains the JWS. The default value is kroxylicious.io/jws.
+
+
+
jwsSignatureValidation.recordHeader.required specifies whether the JWS record header must be present. If set to false, validation is skipped when the header is missing.
+
+
+
jwsSignatureValidation.content.detached specifies whether the record buffer is used as the JWS payload (detached content, as defined in RFC 7515 Appendix F).
+
+
+
+
+
+
+
+
+
+
+
+
+
1.1. Example proxy configuration file
+
+
If your instance of the Kroxylicious Proxy runs directly on an operating system, provide the filter configuration in the filterDefinitions list of your proxy configuration. Here’s a complete example of a filterDefinitions entry configured for record validation:
+{% endraw %}
diff --git a/documentation/0.21.0/html/sasl-inspection-guide/content.html b/documentation/0.21.0/html/sasl-inspection-guide/content.html
new file mode 100644
index 00000000..73226f01
--- /dev/null
+++ b/documentation/0.21.0/html/sasl-inspection-guide/content.html
@@ -0,0 +1,406 @@
+{% raw %}
+
+
+
+
+
+
About this guide
+
+
This guide covers using the Kroxylicious SASL Inspection Filter. This filter extracts the authenticated principal from a successful SASL exchange between Kafka Client and Kafka Broker and makes it available to the other filters in the chain.
+
+
+
Refer to other Kroxylicious guides for information on running the proxy or for advanced topics such as plugin development.
+
+
+
This filter inspects the SASL exchange between Kafka Client and Broker and extracts the authorization ID. If the client’s authentication with the broker is successful, the filter uses the SASL subject builder to construct an authenticated subject. The subject is made available to the other filters in the chain, so that they may know on whose behalf they are acting.
+
+
+
+
+
+
+
The SASL Inspection Filter plays no part in deciding if the authentication is successful or not. That role remains the exclusive responsibility of the broker.
+
+
+
+
+
+
To use this filter, the Kafka Cluster’s listener must be configured to authenticate using SASL, and it must use a SASL mechanism that is enabled by this filter. If the Kafka Client is configured to use a SASL mechanism that is not supported by the proxy, or the proxy and Kafka Cluster do not have the same mechanism available, the client will be disconnected with an unsupported SASL mechanism error.
+
+
+
This filter supports the following SASL mechanisms:
Mechanisms that transmit credentials in plain text are disabled by default. This is done to avoid the plain-text passwords existing in the proxy’s memory. To use such a mechanism, you must enable it in the filter’s configuration.
+
+
+
For the OAUTHBEARER inspection, only JWT tokens that use signatures (JWS) are supported. JWT tokens that use encryption (JWE) are not supported. Unsigned JWT tokens are supported but not recommended for production use.
+
+
+
If an attempt is made to use an unsupported token type, the authentication will fail with a SASL error.
+
+
+
+
+
+
Figure 1. Sequence diagram showing the SASL inspection filter extracting an authenticated principal from an SASL negotiation.
+
+
+
+
+
1. Configuring the SASL inspection filter
+
+
+
This procedure describes how to set up the SASL Inspection filter by configuring it in Kroxylicious.
If your instance of the Kroxylicious Proxy runs directly on an operating system, provide the filter configuration in the filterDefinitions list of your proxy configuration.
+
+
+
Here’s a complete example of a filterDefinitions entry configured for SASL inspection:
enabledMechanisms restricts the filter to the given SASL mechanism(s). Refer to SASL mechanism by its name given in the supported mechanisms table. If omitted, the SASL mechanisms SCRAM-SHA-256, SCRAM-SHA-512 and OAUTHBEARER will be enabled by default.
+
+
+
if requireAuthentication is true, a successful authentication is required before the filter forwards any requests other than those strictly required to perform SASL authentication. If false then the filter will forward all requests regardless of whether SASL authentication has been attempted or was successful. Defaults to false.
+
+
+
subjectBuilder is the name of a plugin class implementing io.kroxylicious.proxy.authentication.SaslSubjectBuilderService. If omitted, a default subject builder creates a principal with name matching the SASL authorized ID.
enabledMechanisms restricts the filter to the given SASL mechanism(s). Refer to SASL mechanism by its name given in the supported mechanisms table. If omitted, the SASL mechanisms SCRAM-SHA-256, SCRAM-SHA-512 and OAUTHBEARER will be enabled by default.
+
+
+
if requireAuthentication is true, a successful authentication is required before the filter forwards any requests other than those strictly required to perform SASL authentication. If false then the filter will forward all requests regardless of whether SASL authentication has been attempted or was successful. Defaults to false.
+
+
+
subjectBuilder is the name of a plugin class implementing io.kroxylicious.proxy.authentication.SaslSubjectBuilderService. If omitted, a default subject builder creates a principal with name matching the SASL authorized ID.
By default, the SASL Inspection filter creates an authenticated subject with a principal that contains the SASL authorized ID. You can change this behavior by providing an alternative subject builder configuration.
+
+
+
2.1. Configuration
+
+
The following example shows configuration for the SASL subject builder:
<field name> identifies the field to extract. Valid field names are listed in the Supported fields table.
+
+
+
<match replace flags 1>…<match replace flags n> define a set of replacement matchers used to match and transform the field value.
+
+
+
else defines the behavior when none of the replacement matchers match the field value. identity generates a principal based on the original (unmapped) field value. anonymous generates an anonymous principal.
+
+
+
<principal factory> the name of a PrincipalFactory implementation. Currently, only UserFactory is supported.
+
+
+
+
+
Field values are tested against the replacement matchers in the order they are configured. The first matcher that matches the field value generates the principal. After a successful match, remaining matchers for that field are ignored.
+
+
+
If no matcher matches, the outcome is defined by the else clause. If there is no match and no else cause is provided, no principal is generated for this field.
+
+
+
+
+
+
+
Currently, the subject builder is restricted to producing a subject containing at most one user principal.
+
+
+
+
+
+
+
2.2. Supported fields
+
+
+
+
+
+
+
+
Field name
+
Description
+
+
+
+
+
+
saslAuthorizedId
+
+
+
The SASL authorized ID returned by the SASL authentication exchange.
+
+
+
+
+
+
+
2.3. Replacement matchers
+
+
Replacement matchers control how field values are matched and transformed. A replacement matcher is defined using this format:
<matching regular expression> is the regular expression used to test the field value.
+
+
+
<replacement> defines the replacement value to use when the regular expression is matched. The replacement can use back references ($1 etc) to refer to capturing groups defined within the regular expression.
+
+
+
<flags> (optional) can be either L or U for lower or upper-casing respectively. The case transformation is applied to the replacement value after any back reference resolution.
+
+
+
+
+
+
+
+
3. Trademark notice
+
+
+
+
+
Apache Kafka is a registered trademark of The Apache Software Foundation.
+
+
+
Kubernetes is a registered trademark of The Linux Foundation.
+
+
+
Prometheus is a registered trademark of The Linux Foundation.
+
+
+
Strimzi is a trademark of The Linux Foundation.
+
+
+
Hashicorp Vault is a registered trademark of HashiCorp, Inc.
+
+
+
AWS Key Management Service is a trademark of Amazon.com, Inc. or its affiliates.
+
+
+
Microsoft, Azure, and Microsoft Entra are trademarks of the Microsoft group of companies.
+
+
+
Fortanix and Data Security Manager are trademarks of Fortanix, Inc.
+
+
+
+
+
+
+
4. Glossary
+
+
+
Glossary of terms used in the SASL Inspection guide.
+
+
+
+
JWE
+
+
JSON Web Encryption is an IETF standard for exchanging encrypted data using JSON.
+
+
JWT
+
+
JSON Web Token is an IETF standard for securely transmitting information between parties as a JSON object.
Annotates a @Plugin
+ implementation class whose
+ fully-qualified type name has been changed
+ and whose old name should no longer be used
+ to refer to it.
Abstracts Key Management System (KMS) operations necessary for the envelope encryption pattern using Data Encryption Keys (DEKs) and Key Encryption Keys (KEKs).
+Starting from the Overview page, you can browse the documentation using the links in each page, and in the navigation bar at the top of each page. The Index and Search box allow you to navigate to specific declarations and summary pages, including: All Packages, All Classes and Interfaces
+
+
Search
+
You can search for definitions of modules, packages, types, fields, methods, system properties and other terms defined in the API. These items can be searched using part or all of the name, optionally using "camelCase" abbreviations, or multiple search terms separated by whitespace. Some examples:
+The following sections describe the different kinds of pages in this collection.
+
+
Overview
+
The Overview page is the front page of this API document and provides a list of all packages with a summary for each. This page can also contain an overall description of the set of packages.
+
+
+
Package
+
Each package has a page that contains a list of its classes and interfaces, with a summary for each. These pages may contain the following categories:
+
+
Interfaces
+
Classes
+
Enum Classes
+
Exception Classes
+
Annotation Interfaces
+
+
+
+
Class or Interface
+
Each class, interface, nested class and nested interface has its own separate page. Each of these pages has three sections consisting of a declaration and description, member summary tables, and detailed member descriptions. Entries in each of these sections are omitted if they are empty or not applicable.
+
+
Class Inheritance Diagram
+
Direct Subclasses
+
All Known Subinterfaces
+
All Known Implementing Classes
+
Class or Interface Declaration
+
Class or Interface Description
+
+
+
+
Nested Class Summary
+
Enum Constant Summary
+
Field Summary
+
Property Summary
+
Constructor Summary
+
Method Summary
+
Required Element Summary
+
Optional Element Summary
+
+
+
+
Enum Constant Details
+
Field Details
+
Property Details
+
Constructor Details
+
Method Details
+
Element Details
+
+
Note: Annotation interfaces have required and optional elements, but not methods. Only enum classes have enum constants. The components of a record class are displayed as part of the declaration of the record class. Properties are a feature of JavaFX.
+
The summary entries are alphabetical, while the detailed descriptions are in the order they appear in the source code. This preserves the logical groupings established by the programmer.
+
+
+
Other Files
+
Packages and modules may contain pages with additional information related to the declarations nearby.
+
+
+
Use
+
Each documented package, class and interface has its own Use page. This page describes what packages, classes, methods, constructors and fields use any part of the given class or package. Given a class or interface A, its Use page includes subclasses of A, fields declared as A, methods that return A, and methods and constructors with parameters of type A. You can access this page by first going to the package, class or interface, then clicking on the USE link in the navigation bar.
+
+
+
Tree (Class Hierarchy)
+
There is a Class Hierarchy page for all packages, plus a hierarchy for each package. Each hierarchy page contains a list of classes and a list of interfaces. Classes are organized by inheritance structure starting with java.lang.Object. Interfaces do not inherit from java.lang.Object.
+
+
When viewing the Overview page, clicking on TREE displays the hierarchy for all packages.
+
When viewing a particular package, class or interface page, clicking on TREE displays the hierarchy for only that package.
+
+
+
+
Deprecated API
+
The Deprecated API page lists all of the API that have been deprecated. A deprecated API is not recommended for use, generally due to shortcomings, and a replacement API is usually given. Deprecated APIs may be removed in future implementations.
Each serializable or externalizable class has a description of its serialization fields and methods. This information is of interest to those who implement rather than use the API. While there is no link in the navigation bar, you can get to this information by going to any serialized class and clicking "Serialized Form" in the "See Also" section of the class description.
+
+
+
All Packages
+
The All Packages page contains an alphabetic index of all packages contained in the documentation.
+
+
+
All Classes and Interfaces
+
The All Classes and Interfaces page contains an alphabetic index of all classes and interfaces contained in the documentation, including annotation interfaces, enum classes, and record classes.
+
+
+
Index
+
The Index contains an alphabetic index of all classes, interfaces, constructors, methods, and fields in the documentation, as well as summary pages such as All Packages, All Classes and Interfaces.
+
+
+
+This help file applies to API documentation generated by the standard doclet.
+
+
Allows a filter (typically one which implements SaslAuthenticateRequestFilter)
+ to announce a successful authentication outcome with the Kafka client to other plugins.
Returns the decision about whether the given AuthorizeResult.subject() is allowed to perform the given
+ operation on the resource with the given resourceName.
Annotates a @Plugin
+ implementation class whose
+ fully-qualified type name has been changed
+ and whose old name should no longer be used
+ to refer to it.
Asynchronously generates a new Data Encryption Key (DEK) and returns it together with the same DEK wrapped by the Key Encryption Key (KEK) given
+ by the kekRef,
+ The returned encrypted DEK can later be decrypted with Kms.decryptEdek(Object).
Abstracts Key Management System (KMS) operations necessary for the envelope encryption pattern using Data Encryption Keys (DEKs) and Key Encryption Keys (KEKs).
Partitions the given items, whose names can be obtained via the given toName function, into two lists
+ based on whether the AuthorizeResult.subject() is allowed to perform the given operation on them.
Abstracts Key Management System (KMS) operations necessary for the envelope encryption pattern using Data Encryption Keys (DEKs) and Key Encryption Keys (KEKs).
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with Objects::equals(Object,Object).
Partitions the given items, whose names can be obtained via the given toName function, into two lists
+ based on whether the subject() is allowed to perform the given operation on them.
Partitions the given items, whose names can be obtained via the given toName function, into two lists
+ based on whether the subject() is allowed to perform the given operation on them.
+
+
Type Parameters:
+
T - The type of item.
+
Parameters:
+
items - The items to partition
+
operation - The operation
+
toName - A function that returns the name of each item.
+
Returns:
+
A pair of lists of the items which the subject is allowed to, or denied from, performing the operation on.
+ It is guaranteed that there is always an entry for both ALLOW and DENY in the returned map.
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with Objects::equals(Object,Object).
Abstracts making an allow/deny decision about some Subject performing some Action on a resource.
+ In other words, this is an access control policy decision point.
+
+
Authorizer is a flexible abstraction, while it assumes that resources have names, it
+ doesn't prescribe any specific kinds of resource or operations. Instead, resource kinds and the operations they support
+ are represented as subclasses of ResourceType.
Determines whether the given subject is allowed to perform the given actions.
+ The implementation must ensure that the returned authorization partitions all the given actions
+ between AuthorizeResult.allowed() and AuthorizeResult.denied().
+
+
Parameters:
+
subject - The subject.
+
actions - The actions.
+
Returns:
+
The outcome. The returned stage should fail with an AuthorizerException if the authorizer was not able to
+ make a decision.
Returns the types of resource that this authorizer is able to make decisions about.
+ If this is not known to the implementation it should return an empty Optional.
+
+
This is provided so that an access control policy enforcement point can confirm that it
+ is capable of providing access control to all the resource types in the access control policy
+ backing this authorizer.
+
+
Returns:
+
the types of resource that this authorizer is able to make decisions about.
Closes the service. Once the service is closed, the building of new instances or the
+ continued use of previously built instances is not allowed. The effect using an instance
+ after the service that created it is closed is undefined.
+
+ Implementations of this method must be idempotent.
+
+ Close implementations must tolerate the closing of service that has not been initialized or
+ one for which initialization did not fully complete without further exception.
Returns the enum constant of this class with the specified name.
+The string must match exactly an identifier used to declare an
+enum constant in this class. (Extraneous whitespace characters are
+not permitted.)
+
+
Parameters:
+
name - the name of the enum constant to be returned.
public interface ResourceType<S extends Enum<S> & ResourceType<S>>
+
A ResourceType is an enum of the possible operations on a resource of a particular type.
+ We use a one-enum-per-resource-type pattern so that the Class an implementation also
+ serves to identify the resource type.
+ For this reason, implementations of this interface should be named for the type of resource
+ (for example Topic, or ConsumerGroup) rather than the operations
+ enumerated (so not TopicOperations or ConsumerGroupOperations).
+ The elements of the enumeration are typically verbs, such as READ,
+ WRITE, DELETE, CREATE, and so on.
Returns a set of operations that are implied by this operation.
+ This must return the complete transitive closure of all such implied operations.
+ In other words, if logically speaking A implies B, and B implies C then
+ programmatically speaking A.implies() must contain both Band C.
+
+
Returns:
+
The operations that are implied by this operation.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/documentation/0.21.0/javadoc/io/kroxylicious/authorizer/service/class-use/Action.html b/documentation/0.21.0/javadoc/io/kroxylicious/authorizer/service/class-use/Action.html
new file mode 100644
index 00000000..735e45ee
--- /dev/null
+++ b/documentation/0.21.0/javadoc/io/kroxylicious/authorizer/service/class-use/Action.html
@@ -0,0 +1,122 @@
+
+
+
+
+Uses of Record Class io.kroxylicious.authorizer.service.Action (Kroxylicious API Javadoc 0.21.0 API)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Uses of Record Class io.kroxylicious.authorizer.service.Action
Returns the decision about whether the given AuthorizeResult.subject() is allowed to perform the given
+ operation on the resource with the given resourceName.
Partitions the given items, whose names can be obtained via the given toName function, into two lists
+ based on whether the AuthorizeResult.subject() is allowed to perform the given operation on them.
Returns the decision about whether the given AuthorizeResult.subject() is allowed to perform the given
+ operation on the resource with the given resourceName.
Partitions the given items, whose names can be obtained via the given toName function, into two lists
+ based on whether the AuthorizeResult.subject() is allowed to perform the given operation on them.
A Data Encryption Key as both plaintext and encrypted.
+
+
Note: It is strongly recommended that the implementation of SecretKey used for
+ dek overrides Destroyable.destroy() to actually destroy the key material.
+ DestroyableRawSecretKey provides such an implementation for use by implementers.
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with Objects::equals(Object,Object).
public class InvalidKeyUsageException
+extends KmsException
+
Thrown when a KMS-managed key is passed to an operation that is incompatible with its allowed key usage.
+ E.g. when a key intended to be used for signing/verifying key, is passed used in a key wrapping operation.
Asynchronously generates a new Data Encryption Key (DEK) and returns it together with the same DEK wrapped by the Key Encryption Key (KEK) given
+ by the kekRef,
+ The returned encrypted DEK can later be decrypted with decryptEdek(Object).
Asynchronously generates a new Data Encryption Key (DEK) and returns it together with the same DEK wrapped by the Key Encryption Key (KEK) given
+ by the kekRef,
+ The returned encrypted DEK can later be decrypted with decryptEdek(Object). It is expected that
+ the returned EDEK contains everything required for decryption including an immutable reference to the KEK
+
+
Parameters:
+
kekRef - The key encryption key used to encrypt the generated data encryption key.
+
Returns:
+
A completion stage for the wrapped data encryption key.
Note: It is strongly recommended that the implementation of SecretKey returned in the CompletionStage
+ overrides Destroyable.destroy() to actually destroy the key material.
+ DestroyableRawSecretKey provides such an implementation for use by implementers.
+
+
Parameters:
+
edek - The encrypted data encryption key.
+
Returns:
+
A completion stage for the data encryption key
+
Throws:
+
UnknownKeyException - If the edek was not encrypted by a KEK known to this KMS.
Builds a KMS service.
+ initialize(C) must have been called before this method is invoked.
+
+
Returns:
+
the KMS.
+
Throws:
+
IllegalStateException - if the KMS Service has not been initialised or the KMS service is closed.
+
+
+
+
+
+
close
+
defaultvoidclose()
+
Closes the service. Once the service is closed, the building of new Kms or the
+ continued use of previously built Kmss is not allowed. The affect using a Kms
+ after the service that created it is closed is undefined.
+
+ Implementations of this method must be idempotent.
+
+ Close implementations must tolerate the closing of service that has not been initialized or
+ one for which initialization did not fully complete without further exception.
BufferOverflowException - If this buffer's current position is not smaller than its limit.
+ This should never be the case if the buffer has at least sizeOf(object) bytes remaining.
Abstracts Key Management System (KMS) operations necessary for the envelope encryption pattern using Data Encryption Keys (DEKs) and Key Encryption Keys (KEKs).
Asynchronously generates a new Data Encryption Key (DEK) and returns it together with the same DEK wrapped by the Key Encryption Key (KEK) given
+ by the kekRef,
+ The returned encrypted DEK can later be decrypted with Kms.decryptEdek(Object).
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/documentation/0.21.0/javadoc/io/kroxylicious/kms/service/class-use/DestroyableRawSecretKey.html b/documentation/0.21.0/javadoc/io/kroxylicious/kms/service/class-use/DestroyableRawSecretKey.html
new file mode 100644
index 00000000..904d81e4
--- /dev/null
+++ b/documentation/0.21.0/javadoc/io/kroxylicious/kms/service/class-use/DestroyableRawSecretKey.html
@@ -0,0 +1,103 @@
+
+
+
+
+Uses of Class io.kroxylicious.kms.service.DestroyableRawSecretKey (Kroxylicious API Javadoc 0.21.0 API)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Uses of Class io.kroxylicious.kms.service.DestroyableRawSecretKey
Abstracts Key Management System (KMS) operations necessary for the envelope encryption pattern using Data Encryption Keys (DEKs) and Key Encryption Keys (KEKs).
Abstracts Key Management System (KMS) operations necessary for the envelope encryption pattern using Data Encryption Keys (DEKs) and Key Encryption Keys (KEKs).
Abstracts Key Management System (KMS) operations necessary for the envelope encryption pattern using Data Encryption Keys (DEKs) and Key Encryption Keys (KEKs).
Abstracts Key Management System (KMS) operations necessary for the envelope encryption pattern using Data Encryption Keys (DEKs) and Key Encryption Keys (KEKs).
Abstracts Key Management System (KMS) operations necessary for the envelope encryption pattern using Data Encryption Keys (DEKs) and Key Encryption Keys (KEKs).
+
The Kms interface provides methods for generating DEKs,
+ encrypting them with KEKs, and later decrypting encrypted DEKs (edeks).
Abstracts Key Management System (KMS) operations necessary for the envelope encryption pattern using Data Encryption Keys (DEKs) and Key Encryption Keys (KEKs).
Exposes SASL authentication information to plugins, for example using FilterContext.clientSaslContext().
+ This is implemented by the runtime for use by plugins.
Implementations must override hashCode() and equals(Object) such
+ instances are equal if, and only if, they have the same implementation class and their names that are the same
+ (according to equals()). One easy way to achieve this is to use a record class with a single name component.
A SASL-authenticating Filter
+ may use a SaslSubjectBuilder in order to construct the
+ Subject with which it calls
+ FilterContext.clientSaslAuthenticationSuccess(String, Subject).
+ As such, SaslSubjectBuilder is an opt-in way of decoupling the building of Subjects
+ from the mechanism of SASL authentication.
+ SASL-authenticating filters are not obliged to use this abstraction.
+
+
TransportSubjectBuilder is a similar interface use for building a
+ Subject based on transport-layer information.
+ However, note that a SaslSubjectBuilder is not specified directly
+ on a virtual cluster as a TransportSubjectBuilder is.
Represents an actor in the system.
+ Subjects are composed of a possibly-empty set of identifiers represented as Principal instances.
+ An anonymous actor is represented by a Subject with an empty set of principals.
+ As a convenience, anonymous() returns such a subject.
+
+
+
The principals included in a subject might comprise the following:
+
+
information proven by a client, such as a SASL authorized id,
+
information known about the client, such as the remote peer's IP address,
+
information obtained about the client from a trusted source, such as lookup up role or group information from a directory.
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with Objects::equals(Object,Object).
A principal identifying an authenticated client.
+ It is currently required to use this principal to represent clients that have authenticated via
+ TLS or SASL.
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with Objects::equals(Object,Object).
Allows a filter (typically one which implements SaslAuthenticateRequestFilter)
+ to announce a successful authentication outcome with the Kafka client to other plugins.
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/documentation/0.21.0/javadoc/io/kroxylicious/proxy/authentication/class-use/SubjectBuildingException.html b/documentation/0.21.0/javadoc/io/kroxylicious/proxy/authentication/class-use/SubjectBuildingException.html
new file mode 100644
index 00000000..42983307
--- /dev/null
+++ b/documentation/0.21.0/javadoc/io/kroxylicious/proxy/authentication/class-use/SubjectBuildingException.html
@@ -0,0 +1,62 @@
+
+
+
+
+Uses of Class io.kroxylicious.proxy.authentication.SubjectBuildingException (Kroxylicious API Javadoc 0.21.0 API)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Uses of Class io.kroxylicious.proxy.authentication.SubjectBuildingException
+
+No usage of io.kroxylicious.proxy.authentication.SubjectBuildingException
+
+
A reference to the file containing a nonempty plain text password in UTF-8 encoding. If the password file
+ contains more than one line, only the characters of the first line are taken to be the password,
+ excluding the line ending. Subsequent lines are ignored.
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with Objects::equals(Object,Object).
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with Objects::equals(Object,Object).
Abstraction for providing passwords to proxy configuration.
+
PasswordProvider implementations allow
+ passwords to be sourced from different locations (inline in configuration, external files).
+ This enables separation of sensitive data from configuration files.
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with Objects::equals(Object,Object).
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with '=='.
A KeyProvider backed by a private-key/certificate pair expressed in PEM format.
+
+ Note that support for PKCS-8 private keys is derived from the JDK. PKCS-1 private keys are only supported if Bouncy Castle
+ is available on the classpath.
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with Objects::equals(Object,Object).
storeFile - location of a key store, or reference to a PEM file containing both private-key and certificate/intermediates.
+
storePasswordProvider - provider for the store password or null if store does not require a password.
+
keyPasswordProvider - provider for the key password. if null the password obtained from the storePasswordProvider will be used to decrypt the key.
+
storeType - specifies the server key type. Legal values are those types supported by the platform KeyStore,
+ and PEM (for X-509 certificates express in PEM format).
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with Objects::equals(Object,Object).
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with Objects::equals(Object,Object).
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with Objects::equals(Object,Object).
Returns the enum constant of this class with the specified name.
+The string must match exactly an identifier used to declare an
+enum constant in this class. (Extraneous whitespace characters are
+not permitted.)
+
+
Parameters:
+
name - the name of the enum constant to be returned.
public record TlsCredentialSupplierConfig(String type, Object config)
+extends Record
+
Configuration for a TLS credential supplier that dynamically provides TLS credentials.
+
+ This follows the same pattern as filter configuration, with a type specifying the
+ ServerTlsCredentialSupplierFactory implementation and an optional config object
+ for supplier-specific configuration.
+
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with Objects::equals(Object,Object).
A TrustProvider is a source of trust anchors used to determine whether a certificate present by a peer is trusted.
+
+
In the TLS client role, it is used to validate that the server's certificate is trusted. If the
+ trust provider is omitted platform trust is used instead.
+
In the TLS server role, when the TLS client authentication is in use, it is used by the server to
+ ensure that the client's certificate is known.
storeFile - location of a key store, or reference to a PEM file containing both private-key/certificate/intermediates.
+
storePasswordProvider - provider for the store password or null if store does not require a password.
+
storeType - specifies the server key type. Legal values are those types supported by the platform KeyStore,
+ and PEM (for X-509 certificates express in PEM format).
+
trustOptions - the trust options that will be applied to this peer.
storeFile - location of a key store, or reference to a PEM file containing both private-key/certificate/intermediates.
+
storePasswordProvider - provider for the store password or null if store does not require a password.
+
storeType - specifies the server key type. Legal values are those types supported by the platform KeyStore,
+ and PEM (for X-509 certificates express in PEM format).
Returns a string representation of this record class. The representation contains the name of the class, followed by the name and value of each of the record components.
Indicates whether some other object is "equal to" this one. The objects are equal if the other object is of the same class and if all the record components are equal. All components in this record class are compared with Objects::equals(Object,Object).
TLS configuration for proxy connections to clients and upstream clusters.
+
These configuration classes define keystores, trust stores, and TLS options
+ for both server-side (client connections) and client-side (upstream cluster connections) TLS.
Determine if a request message of type AddOffsetsToTxnRequest should be handled by
+ this filter implementation.
+ returns true then onAddOffsetsToTxnRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type AddOffsetsToTxnResponse should be handled by
+ this filter implementation.
+ returns true then onAddOffsetsToTxnResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type AddPartitionsToTxnRequest should be handled by
+ this filter implementation.
+ returns true then onAddPartitionsToTxnRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type AddPartitionsToTxnResponse should be handled by
+ this filter implementation.
+ returns true then onAddPartitionsToTxnResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type AddRaftVoterRequest should be handled by
+ this filter implementation.
+ returns true then onAddRaftVoterRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type AddRaftVoterResponse should be handled by
+ this filter implementation.
+ returns true then onAddRaftVoterResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type AllocateProducerIdsRequest should be handled by
+ this filter implementation.
+ returns true then onAllocateProducerIdsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type AllocateProducerIdsResponse should be handled by
+ this filter implementation.
+ returns true then onAllocateProducerIdsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type AlterClientQuotasRequest should be handled by
+ this filter implementation.
+ returns true then onAlterClientQuotasRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type AlterClientQuotasResponse should be handled by
+ this filter implementation.
+ returns true then onAlterClientQuotasResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type AlterConfigsRequest should be handled by
+ this filter implementation.
+ returns true then onAlterConfigsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type AlterConfigsResponse should be handled by
+ this filter implementation.
+ returns true then onAlterConfigsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type AlterPartitionReassignmentsRequest should be handled by
+ this filter implementation.
+ returns true then onAlterPartitionReassignmentsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type AlterPartitionReassignmentsResponse should be handled by
+ this filter implementation.
+ returns true then onAlterPartitionReassignmentsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type AlterPartitionRequest should be handled by
+ this filter implementation.
+ returns true then onAlterPartitionRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type AlterPartitionResponse should be handled by
+ this filter implementation.
+ returns true then onAlterPartitionResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type AlterReplicaLogDirsRequest should be handled by
+ this filter implementation.
+ returns true then onAlterReplicaLogDirsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type AlterReplicaLogDirsResponse should be handled by
+ this filter implementation.
+ returns true then onAlterReplicaLogDirsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type AlterShareGroupOffsetsRequest should be handled by
+ this filter implementation.
+ returns true then onAlterShareGroupOffsetsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type AlterShareGroupOffsetsResponse should be handled by
+ this filter implementation.
+ returns true then onAlterShareGroupOffsetsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type AlterUserScramCredentialsRequest should be handled by
+ this filter implementation.
+ returns true then onAlterUserScramCredentialsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type AlterUserScramCredentialsResponse should be handled by
+ this filter implementation.
+ returns true then onAlterUserScramCredentialsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ApiVersionsRequest should be handled by
+ this filter implementation.
+ returns true then onApiVersionsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ApiVersionsResponse should be handled by
+ this filter implementation.
+ returns true then onApiVersionsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type AssignReplicasToDirsRequest should be handled by
+ this filter implementation.
+ returns true then onAssignReplicasToDirsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type AssignReplicasToDirsResponse should be handled by
+ this filter implementation.
+ returns true then onAssignReplicasToDirsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type BeginQuorumEpochRequest should be handled by
+ this filter implementation.
+ returns true then onBeginQuorumEpochRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type BeginQuorumEpochResponse should be handled by
+ this filter implementation.
+ returns true then onBeginQuorumEpochResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type BrokerHeartbeatRequest should be handled by
+ this filter implementation.
+ returns true then onBrokerHeartbeatRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type BrokerHeartbeatResponse should be handled by
+ this filter implementation.
+ returns true then onBrokerHeartbeatResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type BrokerRegistrationRequest should be handled by
+ this filter implementation.
+ returns true then onBrokerRegistrationRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type BrokerRegistrationResponse should be handled by
+ this filter implementation.
+ returns true then onBrokerRegistrationResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ConsumerGroupDescribeRequest should be handled by
+ this filter implementation.
+ returns true then onConsumerGroupDescribeRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ConsumerGroupDescribeResponse should be handled by
+ this filter implementation.
+ returns true then onConsumerGroupDescribeResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ConsumerGroupHeartbeatRequest should be handled by
+ this filter implementation.
+ returns true then onConsumerGroupHeartbeatRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ConsumerGroupHeartbeatResponse should be handled by
+ this filter implementation.
+ returns true then onConsumerGroupHeartbeatResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ControllerRegistrationRequest should be handled by
+ this filter implementation.
+ returns true then onControllerRegistrationRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ControllerRegistrationResponse should be handled by
+ this filter implementation.
+ returns true then onControllerRegistrationResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type CreateAclsRequest should be handled by
+ this filter implementation.
+ returns true then onCreateAclsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type CreateAclsResponse should be handled by
+ this filter implementation.
+ returns true then onCreateAclsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type CreateDelegationTokenRequest should be handled by
+ this filter implementation.
+ returns true then onCreateDelegationTokenRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type CreateDelegationTokenResponse should be handled by
+ this filter implementation.
+ returns true then onCreateDelegationTokenResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type CreatePartitionsRequest should be handled by
+ this filter implementation.
+ returns true then onCreatePartitionsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type CreatePartitionsResponse should be handled by
+ this filter implementation.
+ returns true then onCreatePartitionsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type CreateTopicsRequest should be handled by
+ this filter implementation.
+ returns true then onCreateTopicsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type CreateTopicsResponse should be handled by
+ this filter implementation.
+ returns true then onCreateTopicsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DeleteAclsRequest should be handled by
+ this filter implementation.
+ returns true then onDeleteAclsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DeleteAclsResponse should be handled by
+ this filter implementation.
+ returns true then onDeleteAclsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DeleteGroupsRequest should be handled by
+ this filter implementation.
+ returns true then onDeleteGroupsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DeleteGroupsResponse should be handled by
+ this filter implementation.
+ returns true then onDeleteGroupsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DeleteRecordsRequest should be handled by
+ this filter implementation.
+ returns true then onDeleteRecordsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DeleteRecordsResponse should be handled by
+ this filter implementation.
+ returns true then onDeleteRecordsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DeleteShareGroupOffsetsRequest should be handled by
+ this filter implementation.
+ returns true then onDeleteShareGroupOffsetsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DeleteShareGroupOffsetsResponse should be handled by
+ this filter implementation.
+ returns true then onDeleteShareGroupOffsetsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DeleteShareGroupStateRequest should be handled by
+ this filter implementation.
+ returns true then onDeleteShareGroupStateRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DeleteShareGroupStateResponse should be handled by
+ this filter implementation.
+ returns true then onDeleteShareGroupStateResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DeleteTopicsRequest should be handled by
+ this filter implementation.
+ returns true then onDeleteTopicsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DeleteTopicsResponse should be handled by
+ this filter implementation.
+ returns true then onDeleteTopicsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DescribeAclsRequest should be handled by
+ this filter implementation.
+ returns true then onDescribeAclsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DescribeAclsResponse should be handled by
+ this filter implementation.
+ returns true then onDescribeAclsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DescribeClientQuotasRequest should be handled by
+ this filter implementation.
+ returns true then onDescribeClientQuotasRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DescribeClientQuotasResponse should be handled by
+ this filter implementation.
+ returns true then onDescribeClientQuotasResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DescribeClusterRequest should be handled by
+ this filter implementation.
+ returns true then onDescribeClusterRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DescribeClusterResponse should be handled by
+ this filter implementation.
+ returns true then onDescribeClusterResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DescribeConfigsRequest should be handled by
+ this filter implementation.
+ returns true then onDescribeConfigsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DescribeConfigsResponse should be handled by
+ this filter implementation.
+ returns true then onDescribeConfigsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DescribeDelegationTokenRequest should be handled by
+ this filter implementation.
+ returns true then onDescribeDelegationTokenRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DescribeDelegationTokenResponse should be handled by
+ this filter implementation.
+ returns true then onDescribeDelegationTokenResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DescribeGroupsRequest should be handled by
+ this filter implementation.
+ returns true then onDescribeGroupsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DescribeGroupsResponse should be handled by
+ this filter implementation.
+ returns true then onDescribeGroupsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DescribeLogDirsRequest should be handled by
+ this filter implementation.
+ returns true then onDescribeLogDirsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DescribeLogDirsResponse should be handled by
+ this filter implementation.
+ returns true then onDescribeLogDirsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DescribeProducersRequest should be handled by
+ this filter implementation.
+ returns true then onDescribeProducersRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DescribeProducersResponse should be handled by
+ this filter implementation.
+ returns true then onDescribeProducersResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DescribeQuorumRequest should be handled by
+ this filter implementation.
+ returns true then onDescribeQuorumRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DescribeQuorumResponse should be handled by
+ this filter implementation.
+ returns true then onDescribeQuorumResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DescribeShareGroupOffsetsRequest should be handled by
+ this filter implementation.
+ returns true then onDescribeShareGroupOffsetsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DescribeShareGroupOffsetsResponse should be handled by
+ this filter implementation.
+ returns true then onDescribeShareGroupOffsetsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DescribeTopicPartitionsRequest should be handled by
+ this filter implementation.
+ returns true then onDescribeTopicPartitionsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DescribeTopicPartitionsResponse should be handled by
+ this filter implementation.
+ returns true then onDescribeTopicPartitionsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DescribeTransactionsRequest should be handled by
+ this filter implementation.
+ returns true then onDescribeTransactionsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DescribeTransactionsResponse should be handled by
+ this filter implementation.
+ returns true then onDescribeTransactionsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type DescribeUserScramCredentialsRequest should be handled by
+ this filter implementation.
+ returns true then onDescribeUserScramCredentialsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type DescribeUserScramCredentialsResponse should be handled by
+ this filter implementation.
+ returns true then onDescribeUserScramCredentialsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ElectLeadersRequest should be handled by
+ this filter implementation.
+ returns true then onElectLeadersRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ElectLeadersResponse should be handled by
+ this filter implementation.
+ returns true then onElectLeadersResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type EndQuorumEpochRequest should be handled by
+ this filter implementation.
+ returns true then onEndQuorumEpochRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type EndQuorumEpochResponse should be handled by
+ this filter implementation.
+ returns true then onEndQuorumEpochResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type EndTxnRequest should be handled by
+ this filter implementation.
+ returns true then onEndTxnRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type EndTxnResponse should be handled by
+ this filter implementation.
+ returns true then onEndTxnResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type EnvelopeRequest should be handled by
+ this filter implementation.
+ returns true then onEnvelopeRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type EnvelopeResponse should be handled by
+ this filter implementation.
+ returns true then onEnvelopeResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ExpireDelegationTokenRequest should be handled by
+ this filter implementation.
+ returns true then onExpireDelegationTokenRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ExpireDelegationTokenResponse should be handled by
+ this filter implementation.
+ returns true then onExpireDelegationTokenResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type FetchRequest should be handled by
+ this filter implementation.
+ returns true then onFetchRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type FetchResponse should be handled by
+ this filter implementation.
+ returns true then onFetchResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type FetchSnapshotRequest should be handled by
+ this filter implementation.
+ returns true then onFetchSnapshotRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type FetchSnapshotResponse should be handled by
+ this filter implementation.
+ returns true then onFetchSnapshotResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Allows a filter (typically one which implements SaslAuthenticateRequestFilter)
+ to announce a successful authentication outcome with the Kafka client to other plugins.
An id which uniquely identifies the connection with the client in both time and space.
+ In other words this will have a different value even if a client re-establishes a
+ TCP connection from the same IP address and source port.
Creates a builder for a request filter result objects. This object encapsulates
+ the request to forward and optionally orders for actions such as closing
+ the connection or dropping the request.
+
+ The builder returns either CompletionStage<RequestFilterResult> object
+ ready to be returned by the request filter, or a ResponseFilterResult object.
+ The latter facilitates asynchronous programming patterns where requests must be
+ forwarded after other work has completed.
Generates a completed filter results containing the given header and request. When
+ request filters implementations return this result, the request will be sent towards
+ the broker, invoking upstream filters.
+
+ Invoking this method is identical to invoking:
+ requestFilterResultBuilder.forward(header, request).complete()
Send a request from a filter towards the broker. The response to the request will be made available to the
+ filter asynchronously, by way of the CompletionStage. The CompletionStage will contain the response
+ object, or null of the request does not have a response.
+
Header
+
The caller is required to provide a RequestHeaderData. It is recommended that the
+ caller specify the RequestHeaderData.requestApiVersion(). This can be done conveniently
+ with forms such as:
+
new RequestHeaderData().setRequestApiVersion(4)
+
The caller may also provide a RequestHeaderData.clientId() an
+ RequestHeaderData.unknownTaggedFields().
+
Kroxylicious will automatically set the RequestHeaderData.requestApiKey() to be consistent
+ with the request. RequestHeaderData.correlationId() is ignored.
+
Filtering
+
The request will pass through all filters upstream of the filter that invoked the operation,
+ invoking them. Similarly, the response will pass through all filters upstream
+ of the filter that invoked the operation, invoking them, but not itself. The response does not
+ pass through filters downstream.
+
+
Chained Computation stages
+
Default and asynchronous default computation stages chained to the returned
+ CompletionStage are guaranteed to be executed by the thread associated with the
+ connection. See io.kroxylicious.proxy.filter for more details.
+
Attempts to map all the given topicIds to the current corresponding topic names.
+
+
Parameters:
+
topicIds - topic ids to map to names
+
Returns:
+
a CompletionStage that will be completed with a complete mapping, with every requested topic id mapped to either an
+ TopicNameMappingException or a name. All failure modes should complete the stage with a TopicNameMapping, with the
+ TopicNameMapping used to convey the reason for failure, rather than failing the Stage.
+
Chained Computation stages
+
Default and asynchronous default computation stages chained to the returned
+ CompletionStage are guaranteed to be executed by the thread
+ associated with the connection. See io.kroxylicious.proxy.filter for more details.
+
Generates a completed filter results containing the given header and response. When
+ response filters implementations return this result, the response will be sent towards
+ the client, invoking downstream filters.
+
+ Invoking this method is identical to invoking:
+ responseFilterResultBuilder.forward(header, response).complete()
Creates a builder for a request filter result objects. This object encapsulates
+ the response to forward and optionally orders for actions such as closing
+ the connection or dropping the response.
+
+ The builder returns either CompletionStage<ResponseFilterResult> object
+ ready to be returned by the response filter, or a ResponseFilterResult object.
+ The latter facilitates asynchronous programming patterns where responses must be
+ forwarded after other work has completed.
Allows a filter (typically one which implements SaslAuthenticateRequestFilter)
+ to announce a successful authentication outcome with the Kafka client to other plugins.
+ After calling this method the results of clientSaslContext()
+ and authenticatedSubject() will both be non-empty for this and other filters.
+
+ In order to support reauthentication, calls to this method and
+ clientSaslAuthenticationFailure(String, String, Exception)
+ may be arbitrarily interleaved during the lifetime of a given filter instance.
Allows a filter (typically one which implements SaslAuthenticateRequestFilter)
+ to announce a failed authentication outcome with the Kafka client.
+ After calling this method the result of clientSaslContext() will
+ be empty for this and other filters.
+ It is the filter's responsibility to return the right error response to a client, and/or disconnect.
+
+ In order to support reauthentication, calls to this method and
+ clientSaslAuthenticationSuccess(String, Subject)
+ may be arbitrarily interleaved during the lifetime of a given filter instance.
+
+
Parameters:
+
mechanism - The SASL mechanism used, or null if this is not known.
+
authorizedId - The authorizedId, or null if this is not known.
+
exception - An exception describing the authentication failure.
Depending on configuration, the subject can be based on network-level or Kafka protocol-level information (or both):
+
+
This will return an
+ anonymous Subject (one with an empty principals set) when
+ no authentication is configured, or the transport layer cannot provide authentication (e.g. TCP or non-mutual TLS transports).
+
When client mutual TLS authentication is configured this will
+ initially return a non-anonymous Subject based on the TLS certificate presented by the client.
Because of the possibility of reauthentication it is also possible for the
+ subject to change even after then initial SASL reauthentication.
+
+
+
Because the subject can change, callers are advised to be careful to avoid
+ caching subjects, or decisions derived from them.
+
+
Which principals are present in the returned subject, and what their names look like,
+ depends on the configuration of network
+ and/or clientSaslAuthenticationSuccess(String, Subject)-calling filters.
+ In general, filters should be configurable with respect to the principal type when interrogating the returned
+ subject.
An Executor backed by the Filter Dispatch Thread. That is the single thread associated with the Channel for
+ a Filter Instance. All invocations of that instance's methods are made by the Filter Dispatch Thread.
+
+ If all accesses/mutations of Filter Members happen on the Filter Dispatch Thread, then those interactions are
+ serial and threadsafe. This executor enables Authors to do work in uncontrolled threads and then return to
+ the Filter Dispatch Thread to take advantage of this convenient threading guarantee.
+
+
+ Note that implementations of FilterDispatchExecutor will not honour the shut-down methods ExecutorService.shutdown()
+ and ExecutorService.shutdownNow() and will throw a RuntimeException if those methods are called, as the client is
+ not allowed to shut down the Filter Dispatch Thread.
+
Switches to this Filter Dispatch Thread. The CompletionStage returned is an implementation
+ that discourages blocking work and will throw if the client attempts to call
+ CompletionStage.toCompletableFuture(). Any async work chained onto the
+ result CompletionStage using methods like CompletionStage.thenApplyAsync(Function)
+ will also be executed on the Filter Dispatch Thread.
+
+
Type Parameters:
+
T - stage value type
+
Parameters:
+
stage - input stage
+
Returns:
+
a stage completed (both exceptionally and normally) by this Filter Dispatch Thread
service implementations provided by filter authors
+
called by the proxy runtime to create filter instances
+
+
+
The proxy runtime guarantees that:
+
+
instances will be initialized before any attempt to create filter instances,
+
instances will eventually be close(Object) closed} if and only if they were successfully initialized,
+
no attempts to create filter instances will be made once a FilterFactory instance is closed,
+
instances will be initialized and closed on the same thread.
+
+
Filter instance creation can happen on a different thread than initialization or cleanup.
+ It is suggested to pass state using via the return value from createFilter(FilterFactoryContext, Object) rather than
+ relying on synchronization within a filter factory implementation.
Initializes the factory with the specified configuration.
+
+
This method is guaranteed to be called at most once for each filter configuration and before any call to
+ createFilter(FilterFactoryContext, Object).
+ This method may provide extra semantic validation of the config,
+ and returns some object (which may be the config, or some other object) which will be passed to createFilter(FilterFactoryContext, Object).
+
+
Parameters:
+
context - context
+
config - configuration
+
Returns:
+
A configuration state object, specific to the given config, which will be passed to the other methods of this interface.
Construction context for Filters. Used to pass the filter configuration and environmental resources
+ to the FilterFactory when it is creating a new instance of the Filter. see FilterFactory.createFilter(FilterFactoryContext, Object)
An executor backed by the single Thread responsible for dispatching
+ work to a Filter instance for a channel.
+ It is safe to mutate Filter members from this executor.
The result of a filter request or response operation that encapsulates the request or response
+ to be forwarded to the next filter in the chain. Optionally it carries orders for actions such
+ as close the connection or drop the message.
A forward of a request or response message to the next filter in the chain.
+
+
Parameters:
+
header - message header. May not be null.
+
message - api message. May not be null. for request messages the class must have a name
+ that that ends with RequestData. for response messages the class must have one
+ that ends with ResponseData.
Determine if a request message of type FindCoordinatorRequest should be handled by
+ this filter implementation.
+ returns true then onFindCoordinatorRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type FindCoordinatorResponse should be handled by
+ this filter implementation.
+ returns true then onFindCoordinatorResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type GetTelemetrySubscriptionsRequest should be handled by
+ this filter implementation.
+ returns true then onGetTelemetrySubscriptionsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type GetTelemetrySubscriptionsResponse should be handled by
+ this filter implementation.
+ returns true then onGetTelemetrySubscriptionsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type HeartbeatRequest should be handled by
+ this filter implementation.
+ returns true then onHeartbeatRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type HeartbeatResponse should be handled by
+ this filter implementation.
+ returns true then onHeartbeatResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type IncrementalAlterConfigsRequest should be handled by
+ this filter implementation.
+ returns true then onIncrementalAlterConfigsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type IncrementalAlterConfigsResponse should be handled by
+ this filter implementation.
+ returns true then onIncrementalAlterConfigsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type InitProducerIdRequest should be handled by
+ this filter implementation.
+ returns true then onInitProducerIdRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type InitProducerIdResponse should be handled by
+ this filter implementation.
+ returns true then onInitProducerIdResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type InitializeShareGroupStateRequest should be handled by
+ this filter implementation.
+ returns true then onInitializeShareGroupStateRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type InitializeShareGroupStateResponse should be handled by
+ this filter implementation.
+ returns true then onInitializeShareGroupStateResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type JoinGroupRequest should be handled by
+ this filter implementation.
+ returns true then onJoinGroupRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type JoinGroupResponse should be handled by
+ this filter implementation.
+ returns true then onJoinGroupResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type LeaveGroupRequest should be handled by
+ this filter implementation.
+ returns true then onLeaveGroupRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type LeaveGroupResponse should be handled by
+ this filter implementation.
+ returns true then onLeaveGroupResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ListConfigResourcesRequest should be handled by
+ this filter implementation.
+ returns true then onListConfigResourcesRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ListConfigResourcesResponse should be handled by
+ this filter implementation.
+ returns true then onListConfigResourcesResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ListGroupsRequest should be handled by
+ this filter implementation.
+ returns true then onListGroupsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ListGroupsResponse should be handled by
+ this filter implementation.
+ returns true then onListGroupsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ListOffsetsRequest should be handled by
+ this filter implementation.
+ returns true then onListOffsetsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ListOffsetsResponse should be handled by
+ this filter implementation.
+ returns true then onListOffsetsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ListPartitionReassignmentsRequest should be handled by
+ this filter implementation.
+ returns true then onListPartitionReassignmentsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ListPartitionReassignmentsResponse should be handled by
+ this filter implementation.
+ returns true then onListPartitionReassignmentsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ListTransactionsRequest should be handled by
+ this filter implementation.
+ returns true then onListTransactionsRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ListTransactionsResponse should be handled by
+ this filter implementation.
+ returns true then onListTransactionsResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type MetadataRequest should be handled by
+ this filter implementation.
+ returns true then onMetadataRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type MetadataResponse should be handled by
+ this filter implementation.
+ returns true then onMetadataResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type OffsetCommitRequest should be handled by
+ this filter implementation.
+ returns true then onOffsetCommitRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type OffsetCommitResponse should be handled by
+ this filter implementation.
+ returns true then onOffsetCommitResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type OffsetDeleteRequest should be handled by
+ this filter implementation.
+ returns true then onOffsetDeleteRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type OffsetDeleteResponse should be handled by
+ this filter implementation.
+ returns true then onOffsetDeleteResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type OffsetFetchRequest should be handled by
+ this filter implementation.
+ returns true then onOffsetFetchRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type OffsetFetchResponse should be handled by
+ this filter implementation.
+ returns true then onOffsetFetchResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type OffsetForLeaderEpochRequest should be handled by
+ this filter implementation.
+ returns true then onOffsetForLeaderEpochRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type OffsetForLeaderEpochResponse should be handled by
+ this filter implementation.
+ returns true then onOffsetForLeaderEpochResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ProduceRequest should be handled by
+ this filter implementation.
+ returns true then onProduceRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ProduceResponse should be handled by
+ this filter implementation.
+ returns true then onProduceResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type PushTelemetryRequest should be handled by
+ this filter implementation.
+ returns true then onPushTelemetryRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type PushTelemetryResponse should be handled by
+ this filter implementation.
+ returns true then onPushTelemetryResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ReadShareGroupStateRequest should be handled by
+ this filter implementation.
+ returns true then onReadShareGroupStateRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ReadShareGroupStateResponse should be handled by
+ this filter implementation.
+ returns true then onReadShareGroupStateResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ReadShareGroupStateSummaryRequest should be handled by
+ this filter implementation.
+ returns true then onReadShareGroupStateSummaryRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ReadShareGroupStateSummaryResponse should be handled by
+ this filter implementation.
+ returns true then onReadShareGroupStateSummaryResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type RemoveRaftVoterRequest should be handled by
+ this filter implementation.
+ returns true then onRemoveRaftVoterRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type RemoveRaftVoterResponse should be handled by
+ this filter implementation.
+ returns true then onRemoveRaftVoterResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type RenewDelegationTokenRequest should be handled by
+ this filter implementation.
+ returns true then onRenewDelegationTokenRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type RenewDelegationTokenResponse should be handled by
+ this filter implementation.
+ returns true then onRenewDelegationTokenResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiKey - key of the request
+
header - header of the request
+
request - body of the request
+
context - context containing methods to continue the filter chain and other contextual data
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiKey - key of the request
+
apiVersion - api version of the request
+
header - header of the request
+
request - body of the request
+
context - context containing methods to continue the filter chain and other contextual data
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
public interface RequestFilterResult
+extends FilterResult
+
A specialization of the FilterResult for request filters.
+
+ A request filter may, rather than forwarding a request towards the broker, opt to
+ send a response towards the client instead. This is called a short circuit response. It
+ is useful for implementing validating filters.
Generate a short-circuit error response towards the client.
+ The generated error response is API-specific, and add an error code (corresponding to the ApiException), and possibly error message (from the message of the ApiException), either at the top level of the response (if the API for the response has a global error code), or for all entities given in the request (if the API for the response has only-per entity error codes).
+
+
Parameters:
+
header - the headers from the request
+
requestMessage - the API request message to generate an error in response too.
+
apiException - the exception that triggered the error response. Note Kafka will map the exception to an using so callers may wish to supply choose their exception to trigger the appropriate error code
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiKey - key of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiKey - key of the response
+
apiVersion - api version of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type SaslAuthenticateRequest should be handled by
+ this filter implementation.
+ returns true then onSaslAuthenticateRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type SaslAuthenticateResponse should be handled by
+ this filter implementation.
+ returns true then onSaslAuthenticateResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type SaslHandshakeRequest should be handled by
+ this filter implementation.
+ returns true then onSaslHandshakeRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type SaslHandshakeResponse should be handled by
+ this filter implementation.
+ returns true then onSaslHandshakeResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ShareAcknowledgeRequest should be handled by
+ this filter implementation.
+ returns true then onShareAcknowledgeRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ShareAcknowledgeResponse should be handled by
+ this filter implementation.
+ returns true then onShareAcknowledgeResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ShareFetchRequest should be handled by
+ this filter implementation.
+ returns true then onShareFetchRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ShareFetchResponse should be handled by
+ this filter implementation.
+ returns true then onShareFetchResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ShareGroupDescribeRequest should be handled by
+ this filter implementation.
+ returns true then onShareGroupDescribeRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ShareGroupDescribeResponse should be handled by
+ this filter implementation.
+ returns true then onShareGroupDescribeResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type ShareGroupHeartbeatRequest should be handled by
+ this filter implementation.
+ returns true then onShareGroupHeartbeatRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type ShareGroupHeartbeatResponse should be handled by
+ this filter implementation.
+ returns true then onShareGroupHeartbeatResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type StreamsGroupDescribeRequest should be handled by
+ this filter implementation.
+ returns true then onStreamsGroupDescribeRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type StreamsGroupDescribeResponse should be handled by
+ this filter implementation.
+ returns true then onStreamsGroupDescribeResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type StreamsGroupHeartbeatRequest should be handled by
+ this filter implementation.
+ returns true then onStreamsGroupHeartbeatRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type StreamsGroupHeartbeatResponse should be handled by
+ this filter implementation.
+ returns true then onStreamsGroupHeartbeatResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type SyncGroupRequest should be handled by
+ this filter implementation.
+ returns true then onSyncGroupRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type SyncGroupResponse should be handled by
+ this filter implementation.
+ returns true then onSyncGroupResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type TxnOffsetCommitRequest should be handled by
+ this filter implementation.
+ returns true then onTxnOffsetCommitRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type TxnOffsetCommitResponse should be handled by
+ this filter implementation.
+ returns true then onTxnOffsetCommitResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type UnregisterBrokerRequest should be handled by
+ this filter implementation.
+ returns true then onUnregisterBrokerRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type UnregisterBrokerResponse should be handled by
+ this filter implementation.
+ returns true then onUnregisterBrokerResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type UpdateFeaturesRequest should be handled by
+ this filter implementation.
+ returns true then onUpdateFeaturesRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type UpdateFeaturesResponse should be handled by
+ this filter implementation.
+ returns true then onUpdateFeaturesResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type UpdateRaftVoterRequest should be handled by
+ this filter implementation.
+ returns true then onUpdateRaftVoterRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type UpdateRaftVoterResponse should be handled by
+ this filter implementation.
+ returns true then onUpdateRaftVoterResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type VoteRequest should be handled by
+ this filter implementation.
+ returns true then onVoteRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type VoteResponse should be handled by
+ this filter implementation.
+ returns true then onVoteResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type WriteShareGroupStateRequest should be handled by
+ this filter implementation.
+ returns true then onWriteShareGroupStateRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type WriteShareGroupStateResponse should be handled by
+ this filter implementation.
+ returns true then onWriteShareGroupStateResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Determine if a request message of type WriteTxnMarkersRequest should be handled by
+ this filter implementation.
+ returns true then onWriteTxnMarkersRequest is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and request pair, returning the header and request
+ pair to be passed to the next filter using the RequestFilterResult.
+
+ The implementation may modify the given header and request in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the request
+
header - request header.
+
request - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a RequestFilterResult containing the
+ request to be forwarded.
Determine if a response message of type WriteTxnMarkersResponse should be handled by
+ this filter implementation.
+ returns true then onWriteTxnMarkersResponse is eligible to be invoked with
+ deserialized data, if the message reaches this filter in the chain.
Handle the given header and response pair, returning the header and response
+ pair to be passed to the next filter using the ResponseFilterResult.
+
+ The implementation may modify the given header and response in-place, or instantiate a
+ new instances.
+
+
Parameters:
+
apiVersion - the apiVersion of the response
+
header - response header.
+
response - The body to handle.
+
context - The context.
+
Returns:
+
a non-null CompletionStage that, when complete, will yield a ResponseFilterResult containing the
+ response to be forwarded.
Filters use these builders (obtained from FilterContext)
+ to construct FilterResult instances that control
+ message forwarding, dropping, short-circuiting, and connection management.
+ All the exception types offer TopicNameMappingException.getError() for conveniently determining the cause. Unhandled
+ exceptions will be mapped to an Errors.UNKNOWN_SERVER_ERROR. Callers will be able to use this to detect expected
+ cases like Errors.UNKNOWN_TOPIC_ID.
APIs for resolving topic IDs to topic names and handling metadata-related errors.
+
Filters that need to translate topic IDs (e.g., from Fetch requests) to topic names
+ can use TopicNameMapping to represent
+ the results of such resolution, including handling of partial failures.
An interface is provided for each kind of request and response in the Kafka protocol, e.g. ProduceRequestFilter.
+ Protocol Filter implementations inherit whichever of the per-RPC interfaces they need to intercept.
+ They can inherit multiple interfaces if necessary.
+ For filters which needs to intercept most or all of the protocol it is more convenient to inherit
+ RequestFilter and/or ResponseFilter.
+
+
Important facts about the Kafka protocol
+
+
Pipelining
+
The Kafka protocol supports pipelining (meaning a client can send multiple requests,
+ before getting a response for any of them). Therefore when writing a filter implementation
+ do not assume you won't see multiple requests before seeing any corresponding responses.
+
+
Ordering
+
A broker does not, in general, send responses in the same order as it receives requests.
+ Therefore when writing a filter implementation do not assume ordering.
+
+
Local view
+
A client may obtain information from one broker in a cluster and use it to interact with other
+ brokers in the cluster (or the same broker, but on a different connection, and therefore a different
+ channel and filter chain). A classic example would
+ be a producer or consumer making a metadata connection and Metadata request to a broker and
+ then connecting to a partition leader to producer/consume records (Produce and Fetch requests).
+
+
So although your filter
+ implementation might intercept both Metadata and Produce request/response
+ (for example), those requests will not pass through the same instance of your filter
+ implementation. Therefore it is incorrect, in general, to assume your filter has a global view of
+ the communication between the client and broker.
+
+
Implementing Filters
+
+
Filter Results
+
Filter implementation must return a CompletionStage containing a
+ FilterResult object. It is the job of FilterResult to convey what
+ message is to be forwarded to the next filter in the chain (or client/broker if at the chain's beginning
+ or end). It is also used to carry instructions such as indicating that the connection must be closed,
+ or a message dropped.
+
If the filter returns a CompletionStage that is already completed normally, Kroxylicious will immediately
+ perform the action described by the FilterResult.
+
If the CompletionStage completes exceptionally, the connection is closed. This also applies if the
+ CompletionStage does not complete within a timeout (20000 milliseconds).
+
Deferring Forwards
+
The filter may return a CompletionStage that is not yet completed. When this happens, Kroxylicious will pause
+ reading from the downstream (the Client writes will eventually block), and it begins to queue up in-flight
+ requests/responses arriving at the filter. This is done so that message order is maintained. Once the
+ CompletionStage completes, the action described by the FilterResult is performed, reading from the downstream
+ resumes and any queued up requests/responses are processed.
+
IMPORTANT: The pausing of reads from the downstream is a relatively costly operation. To maintain optimal performance
+ filter implementations should minimise the occasions on which an incomplete CompletionStage is returned.
+
Creating Filter Result objects
+
The FilterContext is the factory for the FilterResult objects.
+
There are two convenience methods that simply allow a filter to immediately forward a result:
+ The Filter API provides the following thread-safety guarantees:
+
+
+
There is a single thread associated with each connection and this association lasts for the lifetime of connection..
+
Each filter instance is associated with exactly one connection.
+
Construction of the filter instance and dispatch of the filter methods onXxxRequest and
+ onXxxResponse takes place on that same thread.
+
Any computation stages chained to the CompletionStage returned by
+ FilterContext.sendRequest(org.apache.kafka.common.message.RequestHeaderData, org.apache.kafka.common.protocol.ApiMessage)
+ using the default execution methods (using methods without the suffix async) or default asynchronous execution
+ (using methods with suffix async that employ the stage's default asynchronous execution facility)
+ are guaranteed to be performed by that same thread. Computation stages chained using custom asynchronous
+ execution (using methods with suffix async that take an Executor argument) do not get this guarantee.
+
+
Filter implementations are free to rely on these guarantees to safely maintain state within fields
+ of the Filter without employing additional synchronization.
Annotates a @Plugin
+ implementation class whose
+ fully-qualified type name has been changed
+ and whose old name should no longer be used
+ to refer to it.
+
+
Plugin implementations should ideally have a single,
+ canonical name (the fully-qualified class name), so
+ this annotation is not intended to provide a general purpose
+ plugin aliasing facility.
+ Instead, it is provided as a way of "renaming a plugin"
+ while maintaining backwards compatibility with
+ configuration files that continue to use the old
+ implementation class name.
+ When a plugin implementation is instantiated using the old name
+ a warning will be logged prompting the
+ end user to update their configuration to use
+ the new name.
+
+
If a plugin implementation class itself is deprecated then
+ the @Deprecated
+ annotation should be used on that class instead.
An annotation, on a plugin implementation class, that identifies the class of "config record"
+ consumed by that implementation.
+ Use @Plugin(configType=Void.class) if a plugin implementation class doesn't require configuration.
An annotation that identifies a plugin instance name at a plugin point within the configuration.
+ This should be applied to the property of the class representing the plugin point, and should name the
+ corresponding @PluginImplName-annotated sibling property.
API for defining plugins within configuration.
+
+
Terminology
+
A plugin interface is a Java interface (or possibly a class) that can be used
+ in the implementation of a component of the proxy.
+ FilterFactory is one example of a plugin interface.
+
+
A plugin implementation provides a concrete behaviour by implementing the plugin interface
+ (or extending the class).
+ There will usually be more than one plugin implementation for a given plugin interface.
+ Plugin implementations can be referenced in a configuration file by their
+ fully qualified class name, or by their unqualified class name if that is unambiguous.
+ Such references are known as plugin implementation names.
+ Any class that implements FilterFactory is an example of a
+ plugin implementation.
+
+
Plugin implementations often require some configuration to be provided in the configuration.
+ The configuration is represented in Java using a "config record", or "config class".
+ Different plugin implementations will generally use different config records.
+
+
Using a plugin implementation
+
+
The author of a FooFilter that wants to use a HttpGetter to make an HTTP GET request,
+ but doesn't want to depend directly on any particular HTTP client. HttpGetter is the plugin interface.
Note that this plugin interface doesn't know the concrete type of the configuration that an implementation requires.
+ In this case it is using a type parameter C to represent that.
+
+
The config record for FooFilter would express its dependence on a HttpGetter, and also
+ provide a property to hold the HttpGetter implementation's own configuration, like this
The PluginImplConfig.implNameProperty() names the property of the config object that holds the
+ plugin implementation name. In practice the author of FooFilter might want to chose config property names which are intuitive to someone
+ writing a configuration file, such as httpImpl and httpConfig.
+
+
The FooFilter author can then get an instance of the plugin implementation configured by the user using the
+ FilterFactoryContext, like this:
+
+ class FooFilterFactory implements FilterFactory<FooFilterConfig, Void> {
+ Void initialize(FilterFactoryContext context, FooFilterConfig config) {
+ // get the configured HttpGetter
+ this.httpGetter = context.pluginInstance(HttpGetter.class, config.httpGetterPluginImplName());
+ // initialize it
+ this.httpGetter.configure(config.httpGetterConfig());
+ return null;
+ }
+ Filter createFilter(FilterFactoryContext context, Void v) {
+ return new Filter() {
+ // use the httpGetter
+ };
+ }
+ }
+
+
+
Implementing a plugin
+
+
Someone can write an implementation of HttpGetter using Netty. They need to annotate their
+ implementation with @Plugin to indicate
+ the type of configuration it uses.
Annotates a @Plugin
+ implementation class whose
+ fully-qualified type name has been changed
+ and whose old name should no longer be used
+ to refer to it.
Exposes TLS information about the client-to-proxy connection to plugins, for example using FilterContext.clientTlsContext().
+ This is implemented by the runtime for use by plugins.
Supplies TLS credentials for proxy-to-server (upstream) TLS connections.
+
+
Instances of this interface are created by ServerTlsCredentialSupplierFactory
+ and are responsible for providing TLS credentials (private keys and certificate chains)
+ that the proxy uses when connecting to the target Kafka cluster.
+
+
The supplier supports asynchronous credential retrieval, allowing implementations
+ to load credentials from remote sources, perform cryptographic operations, or
+ interact with external services without blocking the proxy runtime.
Implementations must not block the calling thread or perform heavy I/O operations synchronously.
+ Long-running work such as network calls, file I/O, or key generation should be performed
+ asynchronously, returning a CompletionStage that completes when the work is done.
+
+
Error Handling
+
If credential retrieval fails, implementations should return a CompletionStage
+ that completes exceptionally. The runtime will handle the exception appropriately,
+ typically by rejecting the connection attempt.
+
+
Usage Example: File-Based Credential Loading
+
+ public class FileBasedCredentialSupplier implements ServerTlsCredentialSupplier {
+ private final PrivateKey key;
+ private final X509Certificate[] chain;
+
+ public FileBasedCredentialSupplier(PrivateKey key, X509Certificate[] chain) {
+ this.key = key;
+ this.chain = chain;
+ }
+
+ @Override
+ public CompletionStage<TlsCredentials> tlsCredentials(ServerTlsCredentialSupplierContext context) {
+ // Plugin has already parsed the key and certificate chain (from PEM, PKCS12, etc.)
+ // Use context factory method to create validated TlsCredentials
+ TlsCredentials creds = context.tlsCredentials(key, chain);
+ return CompletableFuture.completedFuture(creds);
+ }
+ }
+
Asynchronously retrieves TLS credentials for the proxy to use when connecting
+ to the target Kafka cluster.
+
+
This method may be called multiple times and should return credentials
+ appropriate for the current request context. Implementations may cache
+ credentials, retrieve them from external sources, or generate them on-demand.
+
+
Parameters:
+
context - The runtime context for this credential request
Returns TLS information about the client-to-proxy connection, if available.
+
+
This provides access to the client's TLS certificate (if client authentication
+ was performed) and the proxy's server certificate that was presented to the client.
+ This information can be used to make credential selection decisions based on
+ client identity or other TLS handshake data.
+
+
Returns:
+
Optional containing the client TLS context, or empty if TLS is not in use
+ or if the handshake has not yet completed
Creates a TlsCredentials instance from the given private key and certificate chain.
+
+
This factory method validates the provided credentials before creating the
+ TlsCredentials instance. The validation ensures that:
+
+
The certificate chain is structurally valid
+
The private key matches the leaf certificate's public key
+
+
+
The plugin is responsible for loading and parsing the credentials from whatever
+ source and format it uses (PEM files, PKCS12 keystores, HSMs, etc.).
+
+
Parameters:
+
key - The private key corresponding to the leaf certificate.
+
certificateChain - The certificate chain, starting with the leaf certificate
+ and including any intermediate certificates up to (but not including) the root CA.
service implementations provided by plugin authors
+
called by the proxy runtime to create
+ credential supplier instances
+
used to configure how the proxy obtains TLS credentials for server-side connections
+
+
+
The proxy runtime guarantees that:
+
+
instances will be initialized
+ before any attempt to create
+ credential supplier instances,
+
instances will eventually be closed if and only if they were
+ successfully initialized,
+
no attempts to create credential supplier instances will be made once a factory instance is closed,
+
instances will be initialized and closed on the same thread.
+
+
+
Credential supplier creation can happen on a different thread than initialization or cleanup.
+ It is suggested to pass state using the return value from initialize(ServerTlsCredentialSupplierFactoryContext, Object)
+ rather than relying on synchronization within a factory implementation.
Called once after initialization to create a shared supplier instance. The returned supplier
+ must be thread-safe as it will be shared across all connections to this virtual cluster.
This context is provided to ServerTlsCredentialSupplierFactory methods
+ during initialization and supplier creation. It provides access to the plugin
+ infrastructure, allowing factories to discover and instantiate nested plugins.
+
+
Plugin Composition
+
The context supports plugin composition through pluginInstance(Class, String),
+ enabling credential supplier factories to depend on other plugins such as key
+ management services, certificate authorities, or secret stores.
+
+
Usage Example
+
+ @Plugin(configType = MySupplierConfig.class)
+ public class MySupplierFactory implements ServerTlsCredentialSupplierFactory<MySupplierConfig, Context> {
+
+ @Override
+ public Context initialize(ServerTlsCredentialSupplierFactoryContext context, MySupplierConfig config) {
+ // Get a nested plugin instance for key management
+ KeyManagementService kms = context.pluginInstance(KeyManagementService.class, config.kmsName());
+ return new Context(config, kms);
+ }
+
+ @Override
+ public ServerTlsCredentialSupplier create(ServerTlsCredentialSupplierFactoryContext context, Context initData) {
+ return new MyCredentialSupplier(initData);
+ }
+ }
+
Represents TLS credentials (private key and certificate chain) that can be used
+ for server-side TLS connections.
+
+
This interface is intentionally empty as it serves as a marker interface.
+ The runtime implementation will contain the actual credential data and provide
+ necessary integration with the underlying TLS/SSL infrastructure.
+
+
Plugin developers should not implement this interface directly. Instead,
+ implementations are provided by the Kroxylicious runtime when TLS credentials
+ are successfully loaded from the configured source.
+
+
Usage Example
+
+ // Created via the context factory method
+ TlsCredentials credentials = context.tlsCredentials(privateKey, certificateChain);
+
+ // Returned to the runtime via CompletionStage
+ return CompletableFuture.completedFuture(credentials);
+
Represents TLS credentials (private key and certificate chain) that can be used
+ for server-side TLS connections.
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/documentation/0.21.0/javadoc/legal/ADDITIONAL_LICENSE_INFO b/documentation/0.21.0/javadoc/legal/ADDITIONAL_LICENSE_INFO
new file mode 100644
index 00000000..ff700cd0
--- /dev/null
+++ b/documentation/0.21.0/javadoc/legal/ADDITIONAL_LICENSE_INFO
@@ -0,0 +1,37 @@
+ ADDITIONAL INFORMATION ABOUT LICENSING
+
+Certain files distributed by Oracle America, Inc. and/or its affiliates are
+subject to the following clarification and special exception to the GPLv2,
+based on the GNU Project exception for its Classpath libraries, known as the
+GNU Classpath Exception.
+
+Note that Oracle includes multiple, independent programs in this software
+package. Some of those programs are provided under licenses deemed
+incompatible with the GPLv2 by the Free Software Foundation and others.
+For example, the package includes programs licensed under the Apache
+License, Version 2.0 and may include FreeType. Such programs are licensed
+to you under their original licenses.
+
+Oracle facilitates your further distribution of this package by adding the
+Classpath Exception to the necessary parts of its GPLv2 code, which permits
+you to use that code in combination with other independent modules not
+licensed under the GPLv2. However, note that this would not permit you to
+commingle code under an incompatible license with Oracle's GPLv2 licensed
+code by, for example, cutting and pasting such code into a file also
+containing Oracle's GPLv2 licensed code and then distributing the result.
+
+Additionally, if you were to remove the Classpath Exception from any of the
+files to which it applies and distribute the result, you would likely be
+required to license some or all of the other code in that distribution under
+the GPLv2 as well, and since the GPLv2 is incompatible with the license terms
+of some items included in the distribution by Oracle, removing the Classpath
+Exception could therefore effectively compromise your ability to further
+distribute the package.
+
+Failing to distribute notices associated with some files may also create
+unexpected legal consequences.
+
+Proceed with caution and we recommend that you obtain the advice of a lawyer
+skilled in open source matters before removing the Classpath Exception or
+making modifications to this package which may subsequently be redistributed
+and/or involve the use of third party software.
diff --git a/documentation/0.21.0/javadoc/legal/ASSEMBLY_EXCEPTION b/documentation/0.21.0/javadoc/legal/ASSEMBLY_EXCEPTION
new file mode 100644
index 00000000..42966666
--- /dev/null
+++ b/documentation/0.21.0/javadoc/legal/ASSEMBLY_EXCEPTION
@@ -0,0 +1,27 @@
+
+OPENJDK ASSEMBLY EXCEPTION
+
+The OpenJDK source code made available by Oracle America, Inc. (Oracle) at
+openjdk.org ("OpenJDK Code") is distributed under the terms of the GNU
+General Public License version 2
+only ("GPL2"), with the following clarification and special exception.
+
+ Linking this OpenJDK Code statically or dynamically with other code
+ is making a combined work based on this library. Thus, the terms
+ and conditions of GPL2 cover the whole combination.
+
+ As a special exception, Oracle gives you permission to link this
+ OpenJDK Code with certain code licensed by Oracle as indicated at
+ https://openjdk.org/legal/exception-modules-2007-05-08.html
+ ("Designated Exception Modules") to produce an executable,
+ regardless of the license terms of the Designated Exception Modules,
+ and to copy and distribute the resulting executable under GPL2,
+ provided that the Designated Exception Modules continue to be
+ governed by the licenses under which they were offered by Oracle.
+
+As such, it allows licensees and sublicensees of Oracle's GPL2 OpenJDK Code
+to build an executable that includes those portions of necessary code that
+Oracle could not provide under GPL2 (or that Oracle has provided under GPL2
+with the Classpath exception). If you modify or add to the OpenJDK code,
+that new GPL2 code may still be combined with Designated Exception Modules
+if the new code is made subject to this exception by its copyright holder.
diff --git a/documentation/0.21.0/javadoc/legal/LICENSE b/documentation/0.21.0/javadoc/legal/LICENSE
new file mode 100644
index 00000000..8b400c7a
--- /dev/null
+++ b/documentation/0.21.0/javadoc/legal/LICENSE
@@ -0,0 +1,347 @@
+The GNU General Public License (GPL)
+
+Version 2, June 1991
+
+Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this license
+document, but changing it is not allowed.
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to share
+and change it. By contrast, the GNU General Public License is intended to
+guarantee your freedom to share and change free software--to make sure the
+software is free for all its users. This General Public License applies to
+most of the Free Software Foundation's software and to any other program whose
+authors commit to using it. (Some other Free Software Foundation software is
+covered by the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+When we speak of free software, we are referring to freedom, not price. Our
+General Public Licenses are designed to make sure that you have the freedom to
+distribute copies of free software (and charge for this service if you wish),
+that you receive source code or can get it if you want it, that you can change
+the software or use pieces of it in new free programs; and that you know you
+can do these things.
+
+To protect your rights, we need to make restrictions that forbid anyone to deny
+you these rights or to ask you to surrender the rights. These restrictions
+translate to certain responsibilities for you if you distribute copies of the
+software, or if you modify it.
+
+For example, if you distribute copies of such a program, whether gratis or for
+a fee, you must give the recipients all the rights that you have. You must
+make sure that they, too, receive or can get the source code. And you must
+show them these terms so they know their rights.
+
+We protect your rights with two steps: (1) copyright the software, and (2)
+offer you this license which gives you legal permission to copy, distribute
+and/or modify the software.
+
+Also, for each author's protection and ours, we want to make certain that
+everyone understands that there is no warranty for this free software. If the
+software is modified by someone else and passed on, we want its recipients to
+know that what they have is not the original, so that any problems introduced
+by others will not reflect on the original authors' reputations.
+
+Finally, any free program is threatened constantly by software patents. We
+wish to avoid the danger that redistributors of a free program will
+individually obtain patent licenses, in effect making the program proprietary.
+To prevent this, we have made it clear that any patent must be licensed for
+everyone's free use or not licensed at all.
+
+The precise terms and conditions for copying, distribution and modification
+follow.
+
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License applies to any program or other work which contains a notice
+placed by the copyright holder saying it may be distributed under the terms of
+this General Public License. The "Program", below, refers to any such program
+or work, and a "work based on the Program" means either the Program or any
+derivative work under copyright law: that is to say, a work containing the
+Program or a portion of it, either verbatim or with modifications and/or
+translated into another language. (Hereinafter, translation is included
+without limitation in the term "modification".) Each licensee is addressed as
+"you".
+
+Activities other than copying, distribution and modification are not covered by
+this License; they are outside its scope. The act of running the Program is
+not restricted, and the output from the Program is covered only if its contents
+constitute a work based on the Program (independent of having been made by
+running the Program). Whether that is true depends on what the Program does.
+
+1. You may copy and distribute verbatim copies of the Program's source code as
+you receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice and
+disclaimer of warranty; keep intact all the notices that refer to this License
+and to the absence of any warranty; and give any other recipients of the
+Program a copy of this License along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and you may
+at your option offer warranty protection in exchange for a fee.
+
+2. You may modify your copy or copies of the Program or any portion of it, thus
+forming a work based on the Program, and copy and distribute such modifications
+or work under the terms of Section 1 above, provided that you also meet all of
+these conditions:
+
+ a) You must cause the modified files to carry prominent notices stating
+ that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in whole or
+ in part contains or is derived from the Program or any part thereof, to be
+ licensed as a whole at no charge to all third parties under the terms of
+ this License.
+
+ c) If the modified program normally reads commands interactively when run,
+ you must cause it, when started running for such interactive use in the
+ most ordinary way, to print or display an announcement including an
+ appropriate copyright notice and a notice that there is no warranty (or
+ else, saying that you provide a warranty) and that users may redistribute
+ the program under these conditions, and telling the user how to view a copy
+ of this License. (Exception: if the Program itself is interactive but does
+ not normally print such an announcement, your work based on the Program is
+ not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If identifiable
+sections of that work are not derived from the Program, and can be reasonably
+considered independent and separate works in themselves, then this License, and
+its terms, do not apply to those sections when you distribute them as separate
+works. But when you distribute the same sections as part of a whole which is a
+work based on the Program, the distribution of the whole must be on the terms
+of this License, whose permissions for other licensees extend to the entire
+whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest your
+rights to work written entirely by you; rather, the intent is to exercise the
+right to control the distribution of derivative or collective works based on
+the Program.
+
+In addition, mere aggregation of another work not based on the Program with the
+Program (or with a work based on the Program) on a volume of a storage or
+distribution medium does not bring the other work under the scope of this
+License.
+
+3. You may copy and distribute the Program (or a work based on it, under
+Section 2) in object code or executable form under the terms of Sections 1 and
+2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable source
+ code, which must be distributed under the terms of Sections 1 and 2 above
+ on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three years, to
+ give any third party, for a charge no more than your cost of physically
+ performing source distribution, a complete machine-readable copy of the
+ corresponding source code, to be distributed under the terms of Sections 1
+ and 2 above on a medium customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer to
+ distribute corresponding source code. (This alternative is allowed only
+ for noncommercial distribution and only if you received the program in
+ object code or executable form with such an offer, in accord with
+ Subsection b above.)
+
+The source code for a work means the preferred form of the work for making
+modifications to it. For an executable work, complete source code means all
+the source code for all modules it contains, plus any associated interface
+definition files, plus the scripts used to control compilation and installation
+of the executable. However, as a special exception, the source code
+distributed need not include anything that is normally distributed (in either
+source or binary form) with the major components (compiler, kernel, and so on)
+of the operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the source
+code from the same place counts as distribution of the source code, even though
+third parties are not compelled to copy the source along with the object code.
+
+4. You may not copy, modify, sublicense, or distribute the Program except as
+expressly provided under this License. Any attempt otherwise to copy, modify,
+sublicense or distribute the Program is void, and will automatically terminate
+your rights under this License. However, parties who have received copies, or
+rights, from you under this License will not have their licenses terminated so
+long as such parties remain in full compliance.
+
+5. You are not required to accept this License, since you have not signed it.
+However, nothing else grants you permission to modify or distribute the Program
+or its derivative works. These actions are prohibited by law if you do not
+accept this License. Therefore, by modifying or distributing the Program (or
+any work based on the Program), you indicate your acceptance of this License to
+do so, and all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+6. Each time you redistribute the Program (or any work based on the Program),
+the recipient automatically receives a license from the original licensor to
+copy, distribute or modify the Program subject to these terms and conditions.
+You may not impose any further restrictions on the recipients' exercise of the
+rights granted herein. You are not responsible for enforcing compliance by
+third parties to this License.
+
+7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues), conditions
+are imposed on you (whether by court order, agreement or otherwise) that
+contradict the conditions of this License, they do not excuse you from the
+conditions of this License. If you cannot distribute so as to satisfy
+simultaneously your obligations under this License and any other pertinent
+obligations, then as a consequence you may not distribute the Program at all.
+For example, if a patent license would not permit royalty-free redistribution
+of the Program by all those who receive copies directly or indirectly through
+you, then the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply and
+the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any patents or
+other property right claims or to contest validity of any such claims; this
+section has the sole purpose of protecting the integrity of the free software
+distribution system, which is implemented by public license practices. Many
+people have made generous contributions to the wide range of software
+distributed through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing to
+distribute software through any other system and a licensee cannot impose that
+choice.
+
+This section is intended to make thoroughly clear what is believed to be a
+consequence of the rest of this License.
+
+8. If the distribution and/or use of the Program is restricted in certain
+countries either by patents or by copyrighted interfaces, the original
+copyright holder who places the Program under this License may add an explicit
+geographical distribution limitation excluding those countries, so that
+distribution is permitted only in or among countries not thus excluded. In
+such case, this License incorporates the limitation as if written in the body
+of this License.
+
+9. The Free Software Foundation may publish revised and/or new versions of the
+General Public License from time to time. Such new versions will be similar in
+spirit to the present version, but may differ in detail to address new problems
+or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any later
+version", you have the option of following the terms and conditions either of
+that version or of any later version published by the Free Software Foundation.
+If the Program does not specify a version number of this License, you may
+choose any version ever published by the Free Software Foundation.
+
+10. If you wish to incorporate parts of the Program into other free programs
+whose distribution conditions are different, write to the author to ask for
+permission. For software which is copyrighted by the Free Software Foundation,
+write to the Free Software Foundation; we sometimes make exceptions for this.
+Our decision will be guided by the two goals of preserving the free status of
+all derivatives of our free software and of promoting the sharing and reuse of
+software generally.
+
+NO WARRANTY
+
+11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR
+THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE
+STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE
+PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND
+PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE,
+YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL
+ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE
+PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR
+INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA
+BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER
+OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+END OF TERMS AND CONDITIONS
+
+How to Apply These Terms to Your New Programs
+
+If you develop a new program, and you want it to be of the greatest possible
+use to the public, the best way to achieve this is to make it free software
+which everyone can redistribute and change under these terms.
+
+To do so, attach the following notices to the program. It is safest to attach
+them to the start of each source file to most effectively convey the exclusion
+of warranty; and each file should have at least the "copyright" line and a
+pointer to where the full notice is found.
+
+ One line to give the program's name and a brief idea of what it does.
+
+ Copyright (C)
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this when it
+starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author Gnomovision comes
+ with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free
+ software, and you are welcome to redistribute it under certain conditions;
+ type 'show c' for details.
+
+The hypothetical commands 'show w' and 'show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may be
+called something other than 'show w' and 'show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your school,
+if any, to sign a "copyright disclaimer" for the program, if necessary. Here
+is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ 'Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ signature of Ty Coon, 1 April 1989
+
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General Public
+License instead of this License.
+
+
+"CLASSPATH" EXCEPTION TO THE GPL
+
+Certain source files distributed by Oracle America and/or its affiliates are
+subject to the following clarification and special exception to the GPL, but
+only where Oracle has expressly included in the particular source file's header
+the words "Oracle designates this particular file as subject to the "Classpath"
+exception as provided by Oracle in the LICENSE file that accompanied this code."
+
+ Linking this library statically or dynamically with other modules is making
+ a combined work based on this library. Thus, the terms and conditions of
+ the GNU General Public License cover the whole combination.
+
+ As a special exception, the copyright holders of this library give you
+ permission to link this library with independent modules to produce an
+ executable, regardless of the license terms of these independent modules,
+ and to copy and distribute the resulting executable under terms of your
+ choice, provided that you also meet, for each linked independent module,
+ the terms and conditions of the license of that module. An independent
+ module is a module which is not derived from or based on this library. If
+ you modify this library, you may extend this exception to your version of
+ the library, but you are not obligated to do so. If you do not wish to do
+ so, delete this exception statement from your version.
diff --git a/documentation/0.21.0/javadoc/legal/jquery.md b/documentation/0.21.0/javadoc/legal/jquery.md
new file mode 100644
index 00000000..a763ec6f
--- /dev/null
+++ b/documentation/0.21.0/javadoc/legal/jquery.md
@@ -0,0 +1,26 @@
+## jQuery v3.7.1
+
+### jQuery License
+```
+jQuery v 3.7.1
+Copyright OpenJS Foundation and other contributors, https://openjsf.org/
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+```
diff --git a/documentation/0.21.0/javadoc/legal/jqueryUI.md b/documentation/0.21.0/javadoc/legal/jqueryUI.md
new file mode 100644
index 00000000..46bfbaa5
--- /dev/null
+++ b/documentation/0.21.0/javadoc/legal/jqueryUI.md
@@ -0,0 +1,49 @@
+## jQuery UI v1.14.1
+
+### jQuery UI License
+```
+Copyright OpenJS Foundation and other contributors, https://openjsf.org/
+
+This software consists of voluntary contributions made by many
+individuals. For exact contribution history, see the revision history
+available at https://github.com/jquery/jquery-ui
+
+The following license applies to all parts of this software except as
+documented below:
+
+====
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+====
+
+Copyright and related rights for sample code are waived via CC0. Sample
+code is defined as all source code contained within the demos directory.
+
+CC0: http://creativecommons.org/publicdomain/zero/1.0/
+
+====
+
+All files located in the node_modules and external directories are
+externally maintained libraries used by this software which have their
+own licenses; we recommend you read them, as their terms may differ from
+the terms above.
+
+```
diff --git a/documentation/0.21.0/javadoc/link.svg b/documentation/0.21.0/javadoc/link.svg
new file mode 100644
index 00000000..7ccc5ed0
--- /dev/null
+++ b/documentation/0.21.0/javadoc/link.svg
@@ -0,0 +1,31 @@
+
+
+
+
+
diff --git a/documentation/0.21.0/javadoc/member-search-index.js b/documentation/0.21.0/javadoc/member-search-index.js
new file mode 100644
index 00000000..a4d835ac
--- /dev/null
+++ b/documentation/0.21.0/javadoc/member-search-index.js
@@ -0,0 +1 @@
+memberSearchIndex = [{"p":"io.kroxylicious.proxy.config.tls","c":"KeyPair","l":"accept(KeyProviderVisitor)","u":"accept(io.kroxylicious.proxy.config.tls.KeyProviderVisitor)"},{"p":"io.kroxylicious.proxy.config.tls","c":"KeyProvider","l":"accept(KeyProviderVisitor)","u":"accept(io.kroxylicious.proxy.config.tls.KeyProviderVisitor)"},{"p":"io.kroxylicious.proxy.config.tls","c":"KeyStore","l":"accept(KeyProviderVisitor)","u":"accept(io.kroxylicious.proxy.config.tls.KeyProviderVisitor)"},{"p":"io.kroxylicious.proxy.config.tls","c":"InsecureTls","l":"accept(TrustProviderVisitor)","u":"accept(io.kroxylicious.proxy.config.tls.TrustProviderVisitor)"},{"p":"io.kroxylicious.proxy.config.tls","c":"PlatformTrustProvider","l":"accept(TrustProviderVisitor)","u":"accept(io.kroxylicious.proxy.config.tls.TrustProviderVisitor)"},{"p":"io.kroxylicious.proxy.config.tls","c":"TrustProvider","l":"accept(TrustProviderVisitor)","u":"accept(io.kroxylicious.proxy.config.tls.TrustProviderVisitor)"},{"p":"io.kroxylicious.proxy.config.tls","c":"TrustStore","l":"accept(TrustProviderVisitor)","u":"accept(io.kroxylicious.proxy.config.tls.TrustProviderVisitor)"},{"p":"io.kroxylicious.authorizer.service","c":"Action","l":"Action(ResourceType>, String)","u":"%3Cinit%3E(io.kroxylicious.authorizer.service.ResourceType,java.lang.String)"},{"p":"io.kroxylicious.proxy.filter.metadata","c":"TopicNameMapping","l":"allFailures()"},{"p":"io.kroxylicious.authorizer.service","c":"Decision","l":"ALLOW"},{"p":"io.kroxylicious.proxy.config.tls","c":"AllowDeny","l":"AllowDeny(List, Set)","u":"%3Cinit%3E(java.util.List,java.util.Set)"},{"p":"io.kroxylicious.authorizer.service","c":"AuthorizeResult","l":"allowed()"},{"p":"io.kroxylicious.proxy.config.tls","c":"AllowDeny","l":"allowed()"},{"p":"io.kroxylicious.authorizer.service","c":"AuthorizeResult","l":"allowed(ResourceType>)","u":"allowed(io.kroxylicious.authorizer.service.ResourceType)"},{"p":"io.kroxylicious.proxy.authentication","c":"Subject","l":"allPrincipalsOfType(Class
)","u":"uniquePrincipalOfType(java.lang.Class)"},{"p":"io.kroxylicious.kms.service","c":"UnknownAliasException","l":"UnknownAliasException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"io.kroxylicious.kms.service","c":"UnknownKeyException","l":"UnknownKeyException()","u":"%3Cinit%3E()"},{"p":"io.kroxylicious.kms.service","c":"UnknownKeyException","l":"UnknownKeyException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"io.kroxylicious.proxy.plugin","c":"UnknownPluginInstanceException","l":"UnknownPluginInstanceException(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"io.kroxylicious.proxy.plugin","c":"UnknownPluginInstanceException","l":"UnknownPluginInstanceException(String, Throwable)","u":"%3Cinit%3E(java.lang.String,java.lang.Throwable)"},{"p":"io.kroxylicious.proxy.authentication","c":"User","l":"User(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"io.kroxylicious.proxy.authentication","c":"UserFactory","l":"UserFactory()","u":"%3Cinit%3E()"},{"p":"io.kroxylicious.proxy.plugin","c":"PluginImplName","l":"value()"},{"p":"io.kroxylicious.authorizer.service","c":"Decision","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"io.kroxylicious.proxy.config.tls","c":"TlsClientAuth","l":"valueOf(String)","u":"valueOf(java.lang.String)"},{"p":"io.kroxylicious.authorizer.service","c":"Decision","l":"values()"},{"p":"io.kroxylicious.proxy.config.tls","c":"TlsClientAuth","l":"values()"},{"p":"io.kroxylicious.proxy.config.tls","c":"TrustProviderVisitor","l":"visit(InsecureTls)","u":"visit(io.kroxylicious.proxy.config.tls.InsecureTls)"},{"p":"io.kroxylicious.proxy.config.tls","c":"KeyProviderVisitor","l":"visit(KeyPair)","u":"visit(io.kroxylicious.proxy.config.tls.KeyPair)"},{"p":"io.kroxylicious.proxy.config.tls","c":"KeyProviderVisitor","l":"visit(KeyStore)","u":"visit(io.kroxylicious.proxy.config.tls.KeyStore)"},{"p":"io.kroxylicious.proxy.config.tls","c":"TrustProviderVisitor","l":"visit(PlatformTrustProvider)","u":"visit(io.kroxylicious.proxy.config.tls.PlatformTrustProvider)"},{"p":"io.kroxylicious.proxy.config.tls","c":"TrustProviderVisitor","l":"visit(TrustStore)","u":"visit(io.kroxylicious.proxy.config.tls.TrustStore)"},{"p":"io.kroxylicious.proxy.filter.filterresultbuilder","c":"CloseStage","l":"withCloseConnection()"}];updateSearchResults();
\ No newline at end of file
diff --git a/documentation/0.21.0/javadoc/module-search-index.js b/documentation/0.21.0/javadoc/module-search-index.js
new file mode 100644
index 00000000..0d59754f
--- /dev/null
+++ b/documentation/0.21.0/javadoc/module-search-index.js
@@ -0,0 +1 @@
+moduleSearchIndex = [];updateSearchResults();
\ No newline at end of file
diff --git a/documentation/0.21.0/javadoc/overview-summary.html b/documentation/0.21.0/javadoc/overview-summary.html
new file mode 100644
index 00000000..861082b8
--- /dev/null
+++ b/documentation/0.21.0/javadoc/overview-summary.html
@@ -0,0 +1,25 @@
+
+
+
+
+Kroxylicious API Javadoc 0.21.0 API
+
+
+
+
+
+
+
+
+
+
+
+
+
The help page provides an introduction to the scope and syntax of JavaDoc search.
+
You can use the <ctrl> or <cmd> keys in combination with the left and right arrow keys to switch between result tabs in this page.
+
The URL template below may be used to configure this page as a search engine in browsers that support this feature. It has been tested to work in Google Chrome and Mozilla Firefox. Note that other browsers may not support this feature or require a different URL format.
+link
+
+
+
+
+
Loading search index...
+
+
+
+
+
+
+
+
+
+
diff --git a/documentation/0.21.0/javadoc/search.js b/documentation/0.21.0/javadoc/search.js
new file mode 100644
index 00000000..d3986705
--- /dev/null
+++ b/documentation/0.21.0/javadoc/search.js
@@ -0,0 +1,458 @@
+/*
+ * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+"use strict";
+const messages = {
+ enterTerm: "Enter a search term",
+ noResult: "No results found",
+ oneResult: "Found one result",
+ manyResults: "Found {0} results",
+ loading: "Loading search index...",
+ searching: "Searching...",
+ redirecting: "Redirecting to first result...",
+ linkIcon: "Link icon",
+ linkToSection: "Link to this section"
+}
+const categories = {
+ modules: "Modules",
+ packages: "Packages",
+ types: "Classes and Interfaces",
+ members: "Members",
+ searchTags: "Search Tags"
+};
+const highlight = "$&";
+const NO_MATCH = {};
+const MAX_RESULTS = 300;
+function checkUnnamed(name, separator) {
+ return name === "" || !name ? "" : name + separator;
+}
+function escapeHtml(str) {
+ return str.replace(//g, ">");
+}
+function getHighlightedText(str, boundaries, from, to) {
+ var start = from;
+ var text = "";
+ for (var i = 0; i < boundaries.length; i += 2) {
+ var b0 = boundaries[i];
+ var b1 = boundaries[i + 1];
+ if (b0 >= to || b1 <= from) {
+ continue;
+ }
+ text += escapeHtml(str.slice(start, Math.max(start, b0)));
+ text += "";
+ text += escapeHtml(str.slice(Math.max(start, b0), Math.min(to, b1)));
+ text += "";
+ start = Math.min(to, b1);
+ }
+ text += escapeHtml(str.slice(start, to));
+ return text;
+}
+function getURLPrefix(item, category) {
+ var urlPrefix = "";
+ var slash = "/";
+ if (category === "modules") {
+ return item.l + slash;
+ } else if (category === "packages" && item.m) {
+ return item.m + slash;
+ } else if (category === "types" || category === "members") {
+ if (item.m) {
+ urlPrefix = item.m + slash;
+ } else {
+ $.each(packageSearchIndex, function(index, it) {
+ if (it.m && item.p === it.l) {
+ urlPrefix = it.m + slash;
+ }
+ });
+ }
+ }
+ return urlPrefix;
+}
+function getURL(item, category) {
+ if (item.url) {
+ return item.url;
+ }
+ var url = getURLPrefix(item, category);
+ if (category === "modules") {
+ url += "module-summary.html";
+ } else if (category === "packages") {
+ if (item.u) {
+ url = item.u;
+ } else {
+ url += item.l.replace(/\./g, '/') + "/package-summary.html";
+ }
+ } else if (category === "types") {
+ if (item.u) {
+ url = item.u;
+ } else {
+ url += checkUnnamed(item.p, "/").replace(/\./g, '/') + item.l + ".html";
+ }
+ } else if (category === "members") {
+ url += checkUnnamed(item.p, "/").replace(/\./g, '/') + item.c + ".html" + "#";
+ if (item.u) {
+ url += item.u;
+ } else {
+ url += item.l;
+ }
+ } else if (category === "searchTags") {
+ url += item.u;
+ }
+ item.url = url;
+ return url;
+}
+function createMatcher(term, camelCase) {
+ if (camelCase && !isUpperCase(term)) {
+ return null; // no need for camel-case matcher for lower case query
+ }
+ var pattern = "";
+ var upperCase = [];
+ term.trim().split(/\s+/).forEach(function(w, index, array) {
+ var tokens = w.split(/(?=[A-Z,.()<>?[\/])/);
+ for (var i = 0; i < tokens.length; i++) {
+ var s = tokens[i];
+ // ',' and '?' are the only delimiters commonly followed by space in java signatures
+ pattern += "(" + $.ui.autocomplete.escapeRegex(s).replace(/[,?]/g, "$&\\s*?") + ")";
+ upperCase.push(false);
+ var isWordToken = /\w$/.test(s);
+ if (isWordToken) {
+ if (i === tokens.length - 1 && index < array.length - 1) {
+ // space in query string matches all delimiters
+ pattern += "(.*?)";
+ upperCase.push(isUpperCase(s[0]));
+ } else {
+ if (!camelCase && isUpperCase(s) && s.length === 1) {
+ pattern += "()";
+ } else {
+ pattern += "([a-z0-9$<>?[\\]]*?)";
+ }
+ upperCase.push(isUpperCase(s[0]));
+ }
+ } else {
+ pattern += "()";
+ upperCase.push(false);
+ }
+ }
+ });
+ var re = new RegExp(pattern, "gi");
+ re.upperCase = upperCase;
+ return re;
+}
+function findMatch(matcher, input, startOfName, endOfName) {
+ var from = startOfName;
+ matcher.lastIndex = from;
+ var match = matcher.exec(input);
+ // Expand search area until we get a valid result or reach the beginning of the string
+ while (!match || match.index + match[0].length < startOfName || endOfName < match.index) {
+ if (from === 0) {
+ return NO_MATCH;
+ }
+ from = input.lastIndexOf(".", from - 2) + 1;
+ matcher.lastIndex = from;
+ match = matcher.exec(input);
+ }
+ var boundaries = [];
+ var matchEnd = match.index + match[0].length;
+ var score = 5;
+ var start = match.index;
+ var prevEnd = -1;
+ for (var i = 1; i < match.length; i += 2) {
+ var isUpper = isUpperCase(input[start]);
+ var isMatcherUpper = matcher.upperCase[i];
+ // capturing groups come in pairs, match and non-match
+ boundaries.push(start, start + match[i].length);
+ // make sure groups are anchored on a left word boundary
+ var prevChar = input[start - 1] || "";
+ var nextChar = input[start + 1] || "";
+ if (start !== 0 && !/[\W_]/.test(prevChar) && !/[\W_]/.test(input[start])) {
+ if (isUpper && (isLowerCase(prevChar) || isLowerCase(nextChar))) {
+ score -= 0.1;
+ } else if (isMatcherUpper && start === prevEnd) {
+ score -= isUpper ? 0.1 : 1.0;
+ } else {
+ return NO_MATCH;
+ }
+ }
+ prevEnd = start + match[i].length;
+ start += match[i].length + match[i + 1].length;
+
+ // lower score for parts of the name that are missing
+ if (match[i + 1] && prevEnd < endOfName) {
+ score -= rateNoise(match[i + 1]);
+ }
+ }
+ // lower score if a type name contains unmatched camel-case parts
+ if (input[matchEnd - 1] !== "." && endOfName > matchEnd)
+ score -= rateNoise(input.slice(matchEnd, endOfName));
+ score -= rateNoise(input.slice(0, Math.max(startOfName, match.index)));
+
+ if (score <= 0) {
+ return NO_MATCH;
+ }
+ return {
+ input: input,
+ score: score,
+ boundaries: boundaries
+ };
+}
+function isUpperCase(s) {
+ return s !== s.toLowerCase();
+}
+function isLowerCase(s) {
+ return s !== s.toUpperCase();
+}
+function rateNoise(str) {
+ return (str.match(/([.(])/g) || []).length / 5
+ + (str.match(/([A-Z]+)/g) || []).length / 10
+ + str.length / 20;
+}
+function doSearch(request, response) {
+ var term = request.term.trim();
+ var maxResults = request.maxResults || MAX_RESULTS;
+ if (term.length === 0) {
+ return this.close();
+ }
+ var matcher = {
+ plainMatcher: createMatcher(term, false),
+ camelCaseMatcher: createMatcher(term, true)
+ }
+ var indexLoaded = indexFilesLoaded();
+
+ function getPrefix(item, category) {
+ switch (category) {
+ case "packages":
+ return checkUnnamed(item.m, "/");
+ case "types":
+ return checkUnnamed(item.p, ".");
+ case "members":
+ return checkUnnamed(item.p, ".") + item.c + ".";
+ default:
+ return "";
+ }
+ }
+ function useQualifiedName(category) {
+ switch (category) {
+ case "packages":
+ return /[\s/]/.test(term);
+ case "types":
+ case "members":
+ return /[\s.]/.test(term);
+ default:
+ return false;
+ }
+ }
+ function searchIndex(indexArray, category) {
+ var matches = [];
+ if (!indexArray) {
+ if (!indexLoaded) {
+ matches.push({ l: messages.loading, category: category });
+ }
+ return matches;
+ }
+ $.each(indexArray, function (i, item) {
+ var prefix = getPrefix(item, category);
+ var simpleName = item.l;
+ var qualifiedName = prefix + simpleName;
+ var useQualified = useQualifiedName(category);
+ var input = useQualified ? qualifiedName : simpleName;
+ var startOfName = useQualified ? prefix.length : 0;
+ var endOfName = category === "members" && input.indexOf("(", startOfName) > -1
+ ? input.indexOf("(", startOfName) : input.length;
+ var m = findMatch(matcher.plainMatcher, input, startOfName, endOfName);
+ if (m === NO_MATCH && matcher.camelCaseMatcher) {
+ m = findMatch(matcher.camelCaseMatcher, input, startOfName, endOfName);
+ }
+ if (m !== NO_MATCH) {
+ m.indexItem = item;
+ m.prefix = prefix;
+ m.category = category;
+ if (!useQualified) {
+ m.input = qualifiedName;
+ m.boundaries = m.boundaries.map(function(b) {
+ return b + prefix.length;
+ });
+ }
+ matches.push(m);
+ }
+ return true;
+ });
+ return matches.sort(function(e1, e2) {
+ return e2.score - e1.score;
+ }).slice(0, maxResults);
+ }
+
+ var result = searchIndex(moduleSearchIndex, "modules")
+ .concat(searchIndex(packageSearchIndex, "packages"))
+ .concat(searchIndex(typeSearchIndex, "types"))
+ .concat(searchIndex(memberSearchIndex, "members"))
+ .concat(searchIndex(tagSearchIndex, "searchTags"));
+
+ if (!indexLoaded) {
+ updateSearchResults = function() {
+ doSearch(request, response);
+ }
+ } else {
+ updateSearchResults = function() {};
+ }
+ response(result);
+}
+// JQuery search menu implementation
+$.widget("custom.catcomplete", $.ui.autocomplete, {
+ _create: function() {
+ this._super();
+ this.widget().menu("option", "items", "> .result-item");
+ // workaround for search result scrolling
+ this.menu._scrollIntoView = function _scrollIntoView( item ) {
+ var borderTop, paddingTop, offset, scroll, elementHeight, itemHeight;
+ if ( this._hasScroll() ) {
+ borderTop = parseFloat( $.css( this.activeMenu[ 0 ], "borderTopWidth" ) ) || 0;
+ paddingTop = parseFloat( $.css( this.activeMenu[ 0 ], "paddingTop" ) ) || 0;
+ offset = item.offset().top - this.activeMenu.offset().top - borderTop - paddingTop;
+ scroll = this.activeMenu.scrollTop();
+ elementHeight = this.activeMenu.height() - 26;
+ itemHeight = item.outerHeight();
+
+ if ( offset < 0 ) {
+ this.activeMenu.scrollTop( scroll + offset );
+ } else if ( offset + itemHeight > elementHeight ) {
+ this.activeMenu.scrollTop( scroll + offset - elementHeight + itemHeight );
+ }
+ }
+ };
+ },
+ _renderMenu: function(ul, items) {
+ var currentCategory = "";
+ var widget = this;
+ widget.menu.bindings = $();
+ $.each(items, function(index, item) {
+ if (item.category && item.category !== currentCategory) {
+ ul.append("
RequestFilter.onRequest(ApiKeys, short, RequestHeaderData, ApiMessage, FilterContext)instead.