From 8b34c446a02609d3b23f85dd4c61c042d3dd7cdd Mon Sep 17 00:00:00 2001 From: Kirk True Date: Thu, 26 Feb 2026 17:41:35 -0800 Subject: [PATCH 01/10] Started on work --- .../GetConfigSubscriptionRequest.java | 86 +++++++++++++++++++ .../GetConfigSubscriptionResponse.java | 74 ++++++++++++++++ .../message/GetConfigSubscriptionRequest.json | 16 ++++ .../GetConfigSubscriptionResponse.json | 17 ++++ .../common/message/PushConfigRequest.json | 45 ++++++++++ .../common/message/PushConfigResponse.json | 42 +++++++++ 6 files changed, 280 insertions(+) create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionRequest.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionResponse.java create mode 100644 clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json create mode 100644 clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json create mode 100644 clients/src/main/resources/common/message/PushConfigRequest.json create mode 100644 clients/src/main/resources/common/message/PushConfigResponse.json diff --git a/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionRequest.java new file mode 100644 index 0000000000000..8f5c367b786b2 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionRequest.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.GetConfigSubscriptionRequestData; +import org.apache.kafka.common.message.GetConfigSubscriptionResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.utils.AppInfoParser; + +public class GetConfigSubscriptionRequest extends AbstractRequest { + + public static class Builder extends AbstractRequest.Builder { + private static final String DEFAULT_CLIENT_SOFTWARE_NAME = "apache-kafka-java"; + private static final String DEFAULT_CLIENT_SOFTWARE_ROLE_CONSUMER = "consumer"; + + public static GetConfigSubscriptionRequest.Builder forConsumer(short maxVersion) { + GetConfigSubscriptionRequestData requestData = new GetConfigSubscriptionRequestData() + .setClientSoftwareName(DEFAULT_CLIENT_SOFTWARE_NAME) + .setClientSoftwareVersion(AppInfoParser.getVersion()) + .setClientSoftwareRole(DEFAULT_CLIENT_SOFTWARE_ROLE_CONSUMER); + return new GetConfigSubscriptionRequest.Builder(requestData); + } + + private final GetConfigSubscriptionRequestData data; + + public Builder(GetConfigSubscriptionRequestData data) { + this(data, false); + } + + public Builder(GetConfigSubscriptionRequestData data, boolean enableUnstableLastVersion) { + super(ApiKeys.GET_CONFIG_SUBSCRIPTION, enableUnstableLastVersion); + this.data = data; + } + + @Override + public GetConfigSubscriptionRequest build(short version) { + return new GetConfigSubscriptionRequest(data, version); + } + + @Override + public String toString() { + return data.toString(); + } + } + + private final GetConfigSubscriptionRequestData data; + + public GetConfigSubscriptionRequest(GetConfigSubscriptionRequestData data, short version) { + super(ApiKeys.GET_CONFIG_SUBSCRIPTION, version); + this.data = data; + } + + @Override + public GetConfigSubscriptionResponse getErrorResponse(int throttleTimeMs, Throwable e) { + GetConfigSubscriptionResponseData responseData = new GetConfigSubscriptionResponseData() + .setErrorCode(Errors.forException(e).code()) + .setThrottleTimeMs(throttleTimeMs); + return new GetConfigSubscriptionResponse(responseData); + } + + @Override + public GetConfigSubscriptionRequestData data() { + return data; + } + + public static GetConfigSubscriptionRequest parse(Readable readable, short version) { + return new GetConfigSubscriptionRequest(new GetConfigSubscriptionRequestData( + readable, version), version); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionResponse.java new file mode 100644 index 0000000000000..44499bdd433bc --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionResponse.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.GetConfigSubscriptionResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; + +import java.util.EnumMap; +import java.util.Map; + +/** + * Possible error codes: + * - {@link Errors#UNSUPPORTED_VERSION} + * - {@link Errors#INVALID_REQUEST} + */ +public class GetConfigSubscriptionResponse extends AbstractResponse { + private final GetConfigSubscriptionResponseData data; + + public GetConfigSubscriptionResponse(GetConfigSubscriptionResponseData data) { + super(ApiKeys.GET_CONFIG_SUBSCRIPTION); + this.data = data; + } + + @Override + public GetConfigSubscriptionResponseData data() { + return data; + } + + @Override + public Map errorCounts() { + Map counts = new EnumMap<>(Errors.class); + updateErrorCounts(counts, Errors.forCode(data.errorCode())); + return counts; + } + + @Override + public int throttleTimeMs() { + return data.throttleTimeMs(); + } + + @Override + public void maybeSetThrottleTimeMs(int throttleTimeMs) { + data.setThrottleTimeMs(throttleTimeMs); + } + + public boolean hasError() { + return error() != Errors.NONE; + } + + public Errors error() { + return Errors.forCode(data.errorCode()); + } + + public static GetConfigSubscriptionResponse parse(Readable readable, short version) { + return new GetConfigSubscriptionResponse(new GetConfigSubscriptionResponseData( + readable, version)); + } +} diff --git a/clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json b/clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json new file mode 100644 index 0000000000000..938635b9ce7ec --- /dev/null +++ b/clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json @@ -0,0 +1,16 @@ +{ + "apiKey": 93, + "type": "request", + "listeners": ["broker"], + "name": "GetConfigSubscriptionRequest", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "ClientSoftwareName", "type": "string", "versions": "0+", + "ignorable": false, "about": "The name of the client."}, + { "name": "ClientSoftwareVersion", "type": "string", "versions": "0+", + "ignorable": false, "about": "The version of the client."}, + { "name": "ClientSoftwareRole", "type": "string", "versions": "0+", + "ignorable": true, "about": "The role the client is serving."} + ] +} diff --git a/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json b/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json new file mode 100644 index 0000000000000..a7dcb5dee7eba --- /dev/null +++ b/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json @@ -0,0 +1,17 @@ +{ + "apiKey": 93, + "type": "response", + "name": "GetConfigSubscriptionResponse", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The top-level error code."}, + { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "ignorable": true, + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."}, + { "name": "SubscriptionId", "type": "int32", "versions": "0+", + "about": "Unique identifier for the current subscription set for this client instance."}, + { "name": "ConfigNames", "type": "[]string", "versions": "0+", + "about": "The client configuration names the server wants the client to send."} + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/PushConfigRequest.json b/clients/src/main/resources/common/message/PushConfigRequest.json new file mode 100644 index 0000000000000..f88bb291d1a72 --- /dev/null +++ b/clients/src/main/resources/common/message/PushConfigRequest.json @@ -0,0 +1,45 @@ +{ + "apiKey": 94, + "type": "request", + "listeners": ["broker"], + "name": "PushConfigRequest", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { + "name": "SubscriptionId", + "type": "int32", + "versions": "0+", + "about": "Unique identifier for the current subscription." + }, + { + "name": "Configs", + "type": "[]Config", + "versions": "0+", + "ignorable": false, + "about": "The client configuration entries.", + "fields": [ + { + "name": "ConfigKey", + "type": "string", + "versions": "0+", + "about": "The configuration key." + }, + { + "name": "ConfigValue", + "type": "string", + "versions": "0+", + "default": "", + "about": "The configuration value." + }, + { + "name": "ConfigType", + "type": "int16", + "versions": "0+", + "default": "-1", + "about": "Type (from ConfigDef.Type) of the ConfigValue field." + } + ] + } + ] +} diff --git a/clients/src/main/resources/common/message/PushConfigResponse.json b/clients/src/main/resources/common/message/PushConfigResponse.json new file mode 100644 index 0000000000000..8a57c88cf7398 --- /dev/null +++ b/clients/src/main/resources/common/message/PushConfigResponse.json @@ -0,0 +1,42 @@ +{ + "apiKey": 94, + "type": "response", + "name": "PushConfigResponse", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { + "name": "ThrottleTimeMs", + "type": "int32", + "versions": "0+", + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." + }, + { + "name": "ErrorCode", + "type": "int16", + "versions": "0+", + "about": "The error code, or 0 if there was no error." + }, + { + "name": "ConfigErrors", + "type": "[]ConfigError", + "versions": "0+", + "ignorable": false, + "about": "The client configuration enforcement errors.", + "fields": [ + { + "name": "ConfigKey", + "type": "string", + "versions": "0+", + "about": "The configuration key." + }, + { + "name": "ConfigErrorDescription", + "type": "string", + "versions": "0+", + "about": "Human-readable description of the validation/enforcement error." + } + ] + } + ] +} \ No newline at end of file From 80b681e04b5395c145098a01e19bc578c25415ab Mon Sep 17 00:00:00 2001 From: Kirk True Date: Tue, 24 Mar 2026 14:31:22 -0700 Subject: [PATCH 02/10] wip --- .../errors/ConfigTooLargeException.java | 33 ++++++++ .../UnknownConfigSubscriptionIdException.java | 35 +++++++++ .../apache/kafka/common/protocol/ApiKeys.java | 4 +- .../apache/kafka/common/protocol/Errors.java | 4 +- .../common/requests/AbstractResponse.java | 4 + .../GetConfigSubscriptionRequest.java | 18 ++--- .../common/requests/PushConfigRequest.java | 73 ++++++++++++++++++ .../common/requests/PushConfigResponse.java | 76 +++++++++++++++++++ .../message/GetConfigSubscriptionRequest.json | 10 +-- .../GetConfigSubscriptionResponse.json | 38 ++++++++-- .../common/message/PushConfigRequest.json | 35 ++++----- .../common/message/PushConfigResponse.json | 31 +------- 12 files changed, 282 insertions(+), 79 deletions(-) create mode 100644 clients/src/main/java/org/apache/kafka/common/errors/ConfigTooLargeException.java create mode 100644 clients/src/main/java/org/apache/kafka/common/errors/UnknownConfigSubscriptionIdException.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/PushConfigRequest.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/PushConfigResponse.java diff --git a/clients/src/main/java/org/apache/kafka/common/errors/ConfigTooLargeException.java b/clients/src/main/java/org/apache/kafka/common/errors/ConfigTooLargeException.java new file mode 100644 index 0000000000000..3982eaf95d9a4 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/errors/ConfigTooLargeException.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.errors; + +/** + * Exception thrown when a client configuration payload exceeds the broker's ConfigMaxBytes limit. + */ +public class ConfigTooLargeException extends ApiException { + + private static final long serialVersionUID = 1L; + + public ConfigTooLargeException(String message) { + super(message); + } + + public ConfigTooLargeException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/UnknownConfigSubscriptionIdException.java b/clients/src/main/java/org/apache/kafka/common/errors/UnknownConfigSubscriptionIdException.java new file mode 100644 index 0000000000000..23025a548d04b --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/errors/UnknownConfigSubscriptionIdException.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.errors; + +/** + * Exception thrown when a client sends a configuration push request with an unknown or expired subscription ID. + * This typically happens when the broker's configuration subscription has changed between the time the client + * received the subscription and when it attempted to push its configuration. + */ +public class UnknownConfigSubscriptionIdException extends RetriableException { + + private static final long serialVersionUID = 1L; + + public UnknownConfigSubscriptionIdException(String message) { + super(message); + } + + public UnknownConfigSubscriptionIdException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java index 79b283b4f8d89..a42c64196c00e 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java @@ -137,7 +137,9 @@ public enum ApiKeys { STREAMS_GROUP_DESCRIBE(ApiMessageType.STREAMS_GROUP_DESCRIBE), DESCRIBE_SHARE_GROUP_OFFSETS(ApiMessageType.DESCRIBE_SHARE_GROUP_OFFSETS), ALTER_SHARE_GROUP_OFFSETS(ApiMessageType.ALTER_SHARE_GROUP_OFFSETS), - DELETE_SHARE_GROUP_OFFSETS(ApiMessageType.DELETE_SHARE_GROUP_OFFSETS); + DELETE_SHARE_GROUP_OFFSETS(ApiMessageType.DELETE_SHARE_GROUP_OFFSETS), + GET_CONFIG_SUBSCRIPTION(ApiMessageType.GET_CONFIG_SUBSCRIPTION), + PUSH_CONFIG(ApiMessageType.PUSH_CONFIG); private static final Map> APIS_BY_LISTENER = new EnumMap<>(ApiMessageType.ListenerType.class); diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java index a27a7fcf23c77..1949b0ff8d815 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java @@ -418,7 +418,9 @@ public enum Errors { STREAMS_INVALID_TOPOLOGY(130, "The supplied topology is invalid.", StreamsInvalidTopologyException::new), STREAMS_INVALID_TOPOLOGY_EPOCH(131, "The supplied topology epoch is invalid.", StreamsInvalidTopologyEpochException::new), STREAMS_TOPOLOGY_FENCED(132, "The supplied topology epoch is outdated.", StreamsTopologyFencedException::new), - SHARE_SESSION_LIMIT_REACHED(133, "The limit of share sessions has been reached.", ShareSessionLimitReachedException::new); + SHARE_SESSION_LIMIT_REACHED(133, "The limit of share sessions has been reached.", ShareSessionLimitReachedException::new), + CONFIG_TOO_LARGE(134, "Configuration payload exceeds broker's ConfigMaxBytes limit.", ConfigTooLargeException::new), + UNKNOWN_CONFIG_SUBSCRIPTION_ID(135, "Unknown or expired config subscription ID.", UnknownConfigSubscriptionIdException::new); private static final Logger log = LoggerFactory.getLogger(Errors.class); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java index bc313078d7424..e5e2b5cdbfe8e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java @@ -291,6 +291,10 @@ public static AbstractResponse parseResponse(ApiKeys apiKey, Readable readable, return AlterShareGroupOffsetsResponse.parse(readable, version); case DELETE_SHARE_GROUP_OFFSETS: return DeleteShareGroupOffsetsResponse.parse(readable, version); + case GET_CONFIG_SUBSCRIPTION: + return GetConfigSubscriptionResponse.parse(readable, version); + case PUSH_CONFIG: + return PushConfigResponse.parse(readable, version); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseResponse`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionRequest.java index 8f5c367b786b2..bd032966a858b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionRequest.java @@ -16,27 +16,16 @@ */ package org.apache.kafka.common.requests; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.GetConfigSubscriptionRequestData; import org.apache.kafka.common.message.GetConfigSubscriptionResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.Readable; -import org.apache.kafka.common.utils.AppInfoParser; public class GetConfigSubscriptionRequest extends AbstractRequest { public static class Builder extends AbstractRequest.Builder { - private static final String DEFAULT_CLIENT_SOFTWARE_NAME = "apache-kafka-java"; - private static final String DEFAULT_CLIENT_SOFTWARE_ROLE_CONSUMER = "consumer"; - - public static GetConfigSubscriptionRequest.Builder forConsumer(short maxVersion) { - GetConfigSubscriptionRequestData requestData = new GetConfigSubscriptionRequestData() - .setClientSoftwareName(DEFAULT_CLIENT_SOFTWARE_NAME) - .setClientSoftwareVersion(AppInfoParser.getVersion()) - .setClientSoftwareRole(DEFAULT_CLIENT_SOFTWARE_ROLE_CONSUMER); - return new GetConfigSubscriptionRequest.Builder(requestData); - } - private final GetConfigSubscriptionRequestData data; public Builder(GetConfigSubscriptionRequestData data) { @@ -70,7 +59,10 @@ public GetConfigSubscriptionRequest(GetConfigSubscriptionRequestData data, short public GetConfigSubscriptionResponse getErrorResponse(int throttleTimeMs, Throwable e) { GetConfigSubscriptionResponseData responseData = new GetConfigSubscriptionResponseData() .setErrorCode(Errors.forException(e).code()) - .setThrottleTimeMs(throttleTimeMs); + .setThrottleTimeMs(throttleTimeMs) + .setClientInstanceId(Uuid.ZERO_UUID) + .setSubscriptionId(-1) + .setConfigMaxBytes(0); return new GetConfigSubscriptionResponse(responseData); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/PushConfigRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/PushConfigRequest.java new file mode 100644 index 0000000000000..d060765949ca6 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/PushConfigRequest.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.PushConfigRequestData; +import org.apache.kafka.common.message.PushConfigResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; + +public class PushConfigRequest extends AbstractRequest { + + public static class Builder extends AbstractRequest.Builder { + private final PushConfigRequestData data; + + public Builder(PushConfigRequestData data) { + this(data, false); + } + + public Builder(PushConfigRequestData data, boolean enableUnstableLastVersion) { + super(ApiKeys.PUSH_CONFIG, enableUnstableLastVersion); + this.data = data; + } + + @Override + public PushConfigRequest build(short version) { + return new PushConfigRequest(data, version); + } + + @Override + public String toString() { + return data.toString(); + } + } + + private final PushConfigRequestData data; + + public PushConfigRequest(PushConfigRequestData data, short version) { + super(ApiKeys.PUSH_CONFIG, version); + this.data = data; + } + + @Override + public PushConfigResponse getErrorResponse(int throttleTimeMs, Throwable e) { + PushConfigResponseData responseData = new PushConfigResponseData() + .setErrorCode(Errors.forException(e).code()) + .setThrottleTimeMs(throttleTimeMs); + return new PushConfigResponse(responseData); + } + + @Override + public PushConfigRequestData data() { + return data; + } + + public static PushConfigRequest parse(Readable readable, short version) { + return new PushConfigRequest(new PushConfigRequestData(readable, version), version); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/PushConfigResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/PushConfigResponse.java new file mode 100644 index 0000000000000..379e5c0c17fa7 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/PushConfigResponse.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.PushConfigResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; + +import java.util.EnumMap; +import java.util.Map; + +/** + * Possible error codes: + * - {@link Errors#CONFIG_TOO_LARGE} + * - {@link Errors#INVALID_CONFIG} + * - {@link Errors#UNKNOWN_CONFIG_SUBSCRIPTION_ID} + * - {@link Errors#UNSUPPORTED_VERSION} + * - {@link Errors#INVALID_REQUEST} + */ +public class PushConfigResponse extends AbstractResponse { + private final PushConfigResponseData data; + + public PushConfigResponse(PushConfigResponseData data) { + super(ApiKeys.PUSH_CONFIG); + this.data = data; + } + + @Override + public PushConfigResponseData data() { + return data; + } + + @Override + public Map errorCounts() { + Map counts = new EnumMap<>(Errors.class); + updateErrorCounts(counts, Errors.forCode(data.errorCode())); + return counts; + } + + @Override + public int throttleTimeMs() { + return data.throttleTimeMs(); + } + + @Override + public void maybeSetThrottleTimeMs(int throttleTimeMs) { + data.setThrottleTimeMs(throttleTimeMs); + } + + public boolean hasError() { + return error() != Errors.NONE; + } + + public Errors error() { + return Errors.forCode(data.errorCode()); + } + + public static PushConfigResponse parse(Readable readable, short version) { + return new PushConfigResponse(new PushConfigResponseData(readable, version)); + } +} diff --git a/clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json b/clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json index 938635b9ce7ec..84050fd6a843e 100644 --- a/clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json +++ b/clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json @@ -6,11 +6,9 @@ "validVersions": "0", "flexibleVersions": "0+", "fields": [ - { "name": "ClientSoftwareName", "type": "string", "versions": "0+", - "ignorable": false, "about": "The name of the client."}, - { "name": "ClientSoftwareVersion", "type": "string", "versions": "0+", - "ignorable": false, "about": "The version of the client."}, - { "name": "ClientSoftwareRole", "type": "string", "versions": "0+", - "ignorable": true, "about": "The role the client is serving."} + { + "name": "ClientInstanceId", "type": "uuid", "versions": "0+", + "about": "Unique id for this client instance, must be set to all zeros on the first request." + } ] } diff --git a/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json b/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json index a7dcb5dee7eba..0c7a74f34db4c 100644 --- a/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json +++ b/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json @@ -5,13 +5,35 @@ "validVersions": "0", "flexibleVersions": "0+", "fields": [ - { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The top-level error code."}, - { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "ignorable": true, - "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."}, - { "name": "SubscriptionId", "type": "int32", "versions": "0+", - "about": "Unique identifier for the current subscription set for this client instance."}, - { "name": "ConfigNames", "type": "[]string", "versions": "0+", - "about": "The client configuration names the server wants the client to send."} + { + "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", + "about": "The duration in milliseconds for which the request was throttled." + }, + { + "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The error code, or 0 if there was no error." + }, + { + "name": "ClientInstanceId", "type": "uuid", "versions": "0+", + "about": "Assigned client instance id if request ClientInstanceId was all zeros, else echoes request value." + }, + { + "name": "SubscriptionId", "type": "int32", "versions": "0+", + "about": "Unique identifier for the current config subscription." + }, + { + "name": "ConfigMaxBytes", "type": "int32", "versions": "0+", + "about": "The maximum bytes of config data the broker accepts in PushConfigRequest." + }, + { + "name": "RequestedKeys", "type": "[]ConfigKey", "versions": "0+", + "about": "The config keys the broker wants to receive.", + "fields": [ + { + "name": "Name", "type": "string", "versions": "0+", + "about": "The config key name. May be '*' for all keys." + } + ] + } ] } \ No newline at end of file diff --git a/clients/src/main/resources/common/message/PushConfigRequest.json b/clients/src/main/resources/common/message/PushConfigRequest.json index f88bb291d1a72..05e8601105eea 100644 --- a/clients/src/main/resources/common/message/PushConfigRequest.json +++ b/clients/src/main/resources/common/message/PushConfigRequest.json @@ -7,37 +7,28 @@ "flexibleVersions": "0+", "fields": [ { - "name": "SubscriptionId", - "type": "int32", - "versions": "0+", - "about": "Unique identifier for the current subscription." + "name": "ClientInstanceId", "type": "uuid", "versions": "0+", + "about": "Unique id for this client instance." }, { - "name": "Configs", - "type": "[]Config", - "versions": "0+", - "ignorable": false, + "name": "SubscriptionId", "type": "int32", "versions": "0+", + "about": "The subscription ID from GetConfigSubscriptionResponse." + }, + { + "name": "Configs", "type": "[]ClientConfig", "versions": "0+", "about": "The client configuration entries.", "fields": [ { - "name": "ConfigKey", - "type": "string", - "versions": "0+", - "about": "The configuration key." + "name": "Name", "type": "string", "versions": "0+", + "about": "The configuration key name." }, { - "name": "ConfigValue", - "type": "string", - "versions": "0+", - "default": "", - "about": "The configuration value." + "name": "Value", "type": "string", "versions": "0+", + "about": "The configuration value as a string." }, { - "name": "ConfigType", - "type": "int16", - "versions": "0+", - "default": "-1", - "about": "Type (from ConfigDef.Type) of the ConfigValue field." + "name": "Type", "type": "int8", "versions": "0+", + "about": "The configuration type: 0=BOOLEAN, 1=STRING, 2=INT, 3=SHORT, 4=LONG, 5=DOUBLE, 6=LIST, 7=CLASS." } ] } diff --git a/clients/src/main/resources/common/message/PushConfigResponse.json b/clients/src/main/resources/common/message/PushConfigResponse.json index 8a57c88cf7398..c054a492d44b5 100644 --- a/clients/src/main/resources/common/message/PushConfigResponse.json +++ b/clients/src/main/resources/common/message/PushConfigResponse.json @@ -6,37 +6,12 @@ "flexibleVersions": "0+", "fields": [ { - "name": "ThrottleTimeMs", - "type": "int32", - "versions": "0+", - "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." + "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", + "about": "The duration in milliseconds for which the request was throttled." }, { - "name": "ErrorCode", - "type": "int16", - "versions": "0+", + "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The error code, or 0 if there was no error." - }, - { - "name": "ConfigErrors", - "type": "[]ConfigError", - "versions": "0+", - "ignorable": false, - "about": "The client configuration enforcement errors.", - "fields": [ - { - "name": "ConfigKey", - "type": "string", - "versions": "0+", - "about": "The configuration key." - }, - { - "name": "ConfigErrorDescription", - "type": "string", - "versions": "0+", - "about": "Human-readable description of the validation/enforcement error." - } - ] } ] } \ No newline at end of file From 57e8a7e6a0a3deef3e45b5c7267267861ec308c4 Mon Sep 17 00:00:00 2001 From: Kirk True Date: Tue, 24 Mar 2026 15:16:19 -0700 Subject: [PATCH 03/10] WIP for client --- .../org/apache/kafka/clients/ClientUtils.java | 4 +- .../apache/kafka/clients/NetworkClient.java | 265 +++++++++++++++++- .../clients/consumer/ConsumerConfig.java | 14 + .../apache/kafka/common/protocol/Errors.java | 2 + .../message/GetConfigSubscriptionRequest.json | 15 + .../GetConfigSubscriptionResponse.json | 15 + .../common/message/PushConfigRequest.json | 15 + .../common/message/PushConfigResponse.json | 15 + .../kafka/clients/NetworkClientTest.java | 8 +- 9 files changed, 342 insertions(+), 11 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index c295713df2370..a08e333f8fd9e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -248,7 +248,9 @@ public static NetworkClient createNetworkClient(AbstractConfig config, hostResolver, clientTelemetrySender, config.getLong(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG), - MetadataRecoveryStrategy.forName(config.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)) + MetadataRecoveryStrategy.forName(config.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)), + false, + null ); } catch (Throwable t) { closeQuietly(selector, "Selector"); diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java index 692847a8b1553..976579cb120c2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java @@ -20,10 +20,15 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion; +import org.apache.kafka.common.message.GetConfigSubscriptionRequestData; +import org.apache.kafka.common.message.GetConfigSubscriptionResponseData; +import org.apache.kafka.common.message.PushConfigRequestData; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.network.ChannelState; import org.apache.kafka.common.network.NetworkReceive; @@ -38,9 +43,13 @@ import org.apache.kafka.common.requests.ApiVersionsRequest; import org.apache.kafka.common.requests.ApiVersionsResponse; import org.apache.kafka.common.requests.CorrelationIdMismatchException; +import org.apache.kafka.common.requests.GetConfigSubscriptionRequest; +import org.apache.kafka.common.requests.GetConfigSubscriptionResponse; import org.apache.kafka.common.requests.GetTelemetrySubscriptionsResponse; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.requests.MetadataResponse; +import org.apache.kafka.common.requests.PushConfigRequest; +import org.apache.kafka.common.requests.PushConfigResponse; import org.apache.kafka.common.requests.PushTelemetryResponse; import org.apache.kafka.common.requests.RequestHeader; import org.apache.kafka.common.security.authenticator.SaslClientAuthenticator; @@ -82,6 +91,17 @@ private enum State { CLOSED } + /** + * Config push handshake state tracking + */ + private enum ConfigPushState { + NOT_STARTED, // Haven't started handshake + SUBSCRIPTION_IN_PROGRESS, // Waiting for GetConfigSubscription response + PUSH_IN_PROGRESS, // Waiting for PushConfig response + COMPLETED, // Successfully pushed config + FAILED // Failed (but client continues) + } + private final Logger log; /* the selector used to perform network i/o */ @@ -139,6 +159,15 @@ private enum State { private final TelemetrySender telemetrySender; + // Config push state + private final boolean enableConfigPush; + private final AbstractConfig clientConfig; + private volatile Uuid clientInstanceId = Uuid.ZERO_UUID; + private volatile ConfigPushState configPushState = ConfigPushState.NOT_STARTED; + private volatile int configSubscriptionId = -1; + private volatile int configMaxBytes = 0; + private volatile List requestedConfigKeys = new ArrayList<>(); + public NetworkClient(Selectable selector, Metadata metadata, String clientId, @@ -155,8 +184,9 @@ public NetworkClient(Selectable selector, ApiVersions apiVersions, LogContext logContext, MetadataRecoveryStrategy metadataRecoveryStrategy) { - this(selector, + this(null, metadata, + selector, clientId, maxInFlightRequestsPerConnection, reconnectBackoffMs, @@ -169,9 +199,14 @@ public NetworkClient(Selectable selector, time, discoverBrokerVersions, apiVersions, + null, logContext, + new DefaultHostResolver(), + null, Long.MAX_VALUE, - metadataRecoveryStrategy); + metadataRecoveryStrategy, + false, + null); } public NetworkClient(Selectable selector, @@ -211,7 +246,9 @@ public NetworkClient(Selectable selector, new DefaultHostResolver(), null, rebootstrapTriggerMs, - metadataRecoveryStrategy); + metadataRecoveryStrategy, + false, + null); } public NetworkClient(Selectable selector, @@ -251,7 +288,9 @@ public NetworkClient(Selectable selector, new DefaultHostResolver(), null, Long.MAX_VALUE, - metadataRecoveryStrategy); + metadataRecoveryStrategy, + false, + null); } public NetworkClient(Selectable selector, @@ -290,7 +329,9 @@ public NetworkClient(Selectable selector, new DefaultHostResolver(), null, Long.MAX_VALUE, - metadataRecoveryStrategy); + metadataRecoveryStrategy, + false, + null); } public NetworkClient(MetadataUpdater metadataUpdater, @@ -313,7 +354,9 @@ public NetworkClient(MetadataUpdater metadataUpdater, HostResolver hostResolver, ClientTelemetrySender clientTelemetrySender, long rebootstrapTriggerMs, - MetadataRecoveryStrategy metadataRecoveryStrategy) { + MetadataRecoveryStrategy metadataRecoveryStrategy, + boolean enableConfigPush, + AbstractConfig clientConfig) { /* It would be better if we could pass `DefaultMetadataUpdater` from the public constructor, but it's not * possible because `DefaultMetadataUpdater` is an inner class and it can only be instantiated after the * super constructor is invoked. @@ -346,6 +389,8 @@ public NetworkClient(MetadataUpdater metadataUpdater, this.telemetrySender = (clientTelemetrySender != null) ? new TelemetrySender(clientTelemetrySender) : null; this.rebootstrapTriggerMs = rebootstrapTriggerMs; this.metadataRecoveryStrategy = metadataRecoveryStrategy; + this.enableConfigPush = enableConfigPush; + this.clientConfig = clientConfig; } /** @@ -430,6 +475,13 @@ private void cancelInFlightRequests(String nodeId, metadataUpdater.handleFailedRequest(now, Optional.empty()); } else if (isTelemetryApi(request.header.apiKey()) && telemetrySender != null) { telemetrySender.handleFailedRequest(request.header.apiKey(), null); + } else if (isConfigPushApi(request.header.apiKey()) && enableConfigPush) { + // Config push failed due to disconnect + if (configPushState == ConfigPushState.SUBSCRIPTION_IN_PROGRESS || + configPushState == ConfigPushState.PUSH_IN_PROGRESS) { + log.debug("Config push request failed due to disconnect"); + configPushState = ConfigPushState.FAILED; + } } } } @@ -595,6 +647,14 @@ else if (clientRequest.apiKey() == ApiKeys.METADATA) metadataUpdater.handleFailedRequest(now, Optional.of(unsupportedVersionException)); else if (isTelemetryApi(clientRequest.apiKey()) && telemetrySender != null) telemetrySender.handleFailedRequest(clientRequest.apiKey(), unsupportedVersionException); + else if (isConfigPushApi(clientRequest.apiKey()) && enableConfigPush) { + // Config push request failed due to unsupported version + if (configPushState == ConfigPushState.SUBSCRIPTION_IN_PROGRESS || + configPushState == ConfigPushState.PUSH_IN_PROGRESS) { + log.debug("Config push request failed: unsupported version"); + configPushState = ConfigPushState.FAILED; + } + } } } @@ -655,6 +715,7 @@ public List poll(long timeout, long now) { handleDisconnections(responses, updatedNow); handleConnections(); handleInitiateApiVersionRequests(updatedNow); + handleConfigPushHandshake(updatedNow); handleTimedOutConnections(responses, updatedNow); handleTimedOutRequests(responses, updatedNow); handleRebootstrap(responses, updatedNow); @@ -1015,6 +1076,10 @@ else if (req.isInternalRequest && response instanceof GetTelemetrySubscriptionsR telemetrySender.handleResponse((GetTelemetrySubscriptionsResponse) response); else if (req.isInternalRequest && response instanceof PushTelemetryResponse) telemetrySender.handleResponse((PushTelemetryResponse) response); + else if (req.isInternalRequest && response instanceof GetConfigSubscriptionResponse) + handleGetConfigSubscriptionResponse(req.destination, (GetConfigSubscriptionResponse) response, now); + else if (req.isInternalRequest && response instanceof PushConfigResponse) + handlePushConfigResponse(req.destination, (PushConfigResponse) response, now); else responses.add(req.completed(response, now)); } @@ -1050,6 +1115,20 @@ private void handleApiVersionsResponse(List responses, apiVersionsResponse.data().finalizedFeatures(), apiVersionsResponse.data().finalizedFeaturesEpoch()); apiVersions.update(node, nodeVersionInfo); + + // Check if we should initiate config push handshake + if (enableConfigPush && configPushState == ConfigPushState.NOT_STARTED) { + ApiVersion configSubVersion = nodeVersionInfo.apiVersion(ApiKeys.GET_CONFIG_SUBSCRIPTION); + if (configSubVersion != null) { + log.debug("Node {} supports config push (version {}), will initiate handshake", + node, configSubVersion); + // Don't send immediately - will be triggered in poll() + } else { + log.debug("Node {} does not support config push, skipping", node); + configPushState = ConfigPushState.FAILED; // Skip, not an error + } + } + this.connectionStates.ready(node); log.debug("Node {} has finalized features epoch: {}, finalized features: {}, supported features: {}, API versions: {}.", node, apiVersionsResponse.data().finalizedFeaturesEpoch(), apiVersionsResponse.data().finalizedFeatures(), @@ -1171,6 +1250,10 @@ private boolean isTelemetryApi(ApiKeys apiKey) { return apiKey == ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS || apiKey == ApiKeys.PUSH_TELEMETRY; } + private boolean isConfigPushApi(ApiKeys apiKey) { + return apiKey == ApiKeys.GET_CONFIG_SUBSCRIPTION || apiKey == ApiKeys.PUSH_CONFIG; + } + class DefaultMetadataUpdater implements MetadataUpdater { /* the current cluster metadata */ @@ -1379,6 +1462,176 @@ private InProgressData(int requestVersion, boolean isPartialUpdate) { } + /** + * Handle config push handshake state machine. + * Called from poll() to progress through subscription → push → done. + */ + private void handleConfigPushHandshake(long now) { + // Only proceed if enabled and not in a terminal state + if (!enableConfigPush || + configPushState == ConfigPushState.COMPLETED || + configPushState == ConfigPushState.FAILED) { + return; + } + + // Find a ready node to send to + Node node = leastLoadedNode(now).node(); + if (node == null) { + log.trace("No node available for config push handshake"); + return; + } + + String nodeId = node.idString(); + + // Can't send if channel not ready or in-flight limit reached + if (!selector.isChannelReady(nodeId) || !inFlightRequests.canSendMore(nodeId)) { + return; + } + + // State machine progression + switch (configPushState) { + case NOT_STARTED: + initiateGetConfigSubscription(nodeId, now); + configPushState = ConfigPushState.SUBSCRIPTION_IN_PROGRESS; + break; + + case SUBSCRIPTION_IN_PROGRESS: + // Waiting for response, nothing to do + break; + + case PUSH_IN_PROGRESS: + // Waiting for response, nothing to do + break; + + default: + // Terminal states, shouldn't reach here + break; + } + } + + /** + * Send GetConfigSubscription request to broker. + */ + private void initiateGetConfigSubscription(String nodeId, long now) { + log.debug("Sending GetConfigSubscription to node {}", nodeId); + + GetConfigSubscriptionRequestData requestData = new GetConfigSubscriptionRequestData() + .setClientInstanceId(clientInstanceId); // ZERO_UUID on first call + + GetConfigSubscriptionRequest.Builder builder = + new GetConfigSubscriptionRequest.Builder(requestData); + + ClientRequest request = newClientRequest(nodeId, builder, now, true); + doSend(request, true, now); + } + + /** + * Process GetConfigSubscription response. + * Stores subscription details and transitions to push phase if successful. + */ + private void handleGetConfigSubscriptionResponse( + String nodeId, + GetConfigSubscriptionResponse response, + long now) { + + Errors error = response.error(); + if (error != Errors.NONE) { + log.warn("GetConfigSubscription request to {} failed with error: {}", nodeId, error); + configPushState = ConfigPushState.FAILED; + return; + } + + GetConfigSubscriptionResponseData data = response.data(); + + // Store client instance ID if this was the first request + Uuid receivedInstanceId = data.clientInstanceId(); + if (!receivedInstanceId.equals(Uuid.ZERO_UUID)) { + clientInstanceId = receivedInstanceId; + log.debug("Received client instance ID: {}", clientInstanceId); + } + + // Store subscription details + configSubscriptionId = data.subscriptionId(); + configMaxBytes = data.configMaxBytes(); + + // Extract requested keys + requestedConfigKeys = data.requestedKeys() + .stream() + .map(key -> key.name()) + .collect(Collectors.toList()); + + log.debug("Config subscription received: subscriptionId={}, maxBytes={}, keys={}", + configSubscriptionId, configMaxBytes, requestedConfigKeys.size()); + + // Immediately initiate push + initiatePushConfig(nodeId, now); + configPushState = ConfigPushState.PUSH_IN_PROGRESS; + } + + /** + * Collect client configuration and send PushConfig request. + */ + private void initiatePushConfig(String nodeId, long now) { + log.debug("Collecting and pushing config to node {}", nodeId); + + // Collect configs using ConfigCollector + List configs; + try { + configs = ConfigCollector.collectConfigs( + clientConfig, + requestedConfigKeys, + configMaxBytes + ); + } catch (Exception e) { + log.error("Failed to collect configs for push", e); + configPushState = ConfigPushState.FAILED; + return; + } + + // Build request + PushConfigRequestData requestData = new PushConfigRequestData() + .setClientInstanceId(clientInstanceId) + .setSubscriptionId(configSubscriptionId) + .setConfigs(configs); + + PushConfigRequest.Builder builder = new PushConfigRequest.Builder(requestData); + + ClientRequest request = newClientRequest(nodeId, builder, now, true); + doSend(request, true, now); + } + + /** + * Process PushConfig response. + * Marks handshake as complete or handles retry scenarios. + */ + private void handlePushConfigResponse( + String nodeId, + PushConfigResponse response, + long now) { + + Errors error = response.error(); + + if (error == Errors.NONE) { + log.info("Configuration push to {} completed successfully", nodeId); + configPushState = ConfigPushState.COMPLETED; + + } else if (error == Errors.UNKNOWN_CONFIG_SUBSCRIPTION_ID) { + log.warn("Subscription changed on {}, retrying GetConfigSubscription", nodeId); + // Reset to retry once + configPushState = ConfigPushState.NOT_STARTED; + configSubscriptionId = -1; + requestedConfigKeys.clear(); + + } else if (error == Errors.CONFIG_TOO_LARGE) { + log.error("Config payload too large for {}, cannot retry", nodeId); + configPushState = ConfigPushState.FAILED; + + } else { + log.warn("PushConfig to {} failed with error: {}", nodeId, error); + configPushState = ConfigPushState.FAILED; + } + } + class TelemetrySender { private final ClientTelemetrySender clientTelemetrySender; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java index 7ad52c2122627..cfd31c61d2227 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java @@ -390,6 +390,15 @@ public class ConsumerConfig extends AbstractConfig { " to align with batch boundaries for optimization."; public static final String DEFAULT_SHARE_ACQUIRE_MODE = ShareAcquireMode.BATCH_OPTIMIZED.name(); + /** + * enable.config.push + */ + public static final String ENABLE_CONFIG_PUSH_CONFIG = "enable.config.push"; + private static final String ENABLE_CONFIG_PUSH_DOC = + "When set to 'true', the consumer will push its configuration to the broker " + + "for observability and troubleshooting. This is a best-effort operation and " + + "failures will not prevent the consumer from functioning normally."; + private static final AtomicInteger CONSUMER_CLIENT_ID_SEQUENCE = new AtomicInteger(1); /** @@ -701,6 +710,11 @@ public class ConsumerConfig extends AbstractConfig { new ShareAcquireMode.Validator(), Importance.MEDIUM, ConsumerConfig.SHARE_ACQUIRE_MODE_DOC) + .define(ENABLE_CONFIG_PUSH_CONFIG, + Type.BOOLEAN, + true, + Importance.LOW, + ENABLE_CONFIG_PUSH_DOC) .define(CONFIG_PROVIDERS_CONFIG, ConfigDef.Type.LIST, List.of(), diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java index 1949b0ff8d815..21a7119e35882 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java @@ -22,6 +22,7 @@ import org.apache.kafka.common.errors.BrokerNotAvailableException; import org.apache.kafka.common.errors.ClusterAuthorizationException; import org.apache.kafka.common.errors.ConcurrentTransactionsException; +import org.apache.kafka.common.errors.ConfigTooLargeException; import org.apache.kafka.common.errors.ControllerMovedException; import org.apache.kafka.common.errors.CoordinatorLoadInProgressException; import org.apache.kafka.common.errors.CoordinatorNotAvailableException; @@ -134,6 +135,7 @@ import org.apache.kafka.common.errors.TransactionalIdAuthorizationException; import org.apache.kafka.common.errors.TransactionalIdNotFoundException; import org.apache.kafka.common.errors.UnacceptableCredentialException; +import org.apache.kafka.common.errors.UnknownConfigSubscriptionIdException; import org.apache.kafka.common.errors.UnknownControllerIdException; import org.apache.kafka.common.errors.UnknownLeaderEpochException; import org.apache.kafka.common.errors.UnknownMemberIdException; diff --git a/clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json b/clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json index 84050fd6a843e..64f92eba1ff2c 100644 --- a/clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json +++ b/clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json @@ -1,3 +1,18 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + { "apiKey": 93, "type": "request", diff --git a/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json b/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json index 0c7a74f34db4c..29e55feb65fa2 100644 --- a/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json +++ b/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json @@ -1,3 +1,18 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + { "apiKey": 93, "type": "response", diff --git a/clients/src/main/resources/common/message/PushConfigRequest.json b/clients/src/main/resources/common/message/PushConfigRequest.json index 05e8601105eea..893a453f5cd1a 100644 --- a/clients/src/main/resources/common/message/PushConfigRequest.json +++ b/clients/src/main/resources/common/message/PushConfigRequest.json @@ -1,3 +1,18 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + { "apiKey": 94, "type": "request", diff --git a/clients/src/main/resources/common/message/PushConfigResponse.json b/clients/src/main/resources/common/message/PushConfigResponse.json index c054a492d44b5..0b9c7f1d834b6 100644 --- a/clients/src/main/resources/common/message/PushConfigResponse.json +++ b/clients/src/main/resources/common/message/PushConfigResponse.json @@ -1,3 +1,18 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + { "apiKey": 94, "type": "response", diff --git a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java index e8dcf5843dcb8..7e8fbe20fefbb 100644 --- a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java @@ -1152,7 +1152,7 @@ public void testReconnectAfterAddressChange() { reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, - Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); + Long.MAX_VALUE, MetadataRecoveryStrategy.NONE, false, null); // Connect to one the initial addresses, then change the addresses and disconnect client.ready(node, time.milliseconds()); @@ -1213,7 +1213,7 @@ public void testFailedConnectionToFirstAddress() { reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, - Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); + Long.MAX_VALUE, MetadataRecoveryStrategy.NONE, false, null); // First connection attempt should fail client.ready(node, time.milliseconds()); @@ -1266,7 +1266,7 @@ public void testFailedConnectionToFirstAddressAfterReconnect() { reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, - Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); + Long.MAX_VALUE, MetadataRecoveryStrategy.NONE, false, null); // Connect to one the initial addresses, then change the addresses and disconnect client.ready(node, time.milliseconds()); @@ -1375,7 +1375,7 @@ public void testTelemetryRequest() { reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, true, new ApiVersions(), null, new LogContext(), new DefaultHostResolver(), mockClientTelemetrySender, - Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); + Long.MAX_VALUE, MetadataRecoveryStrategy.NONE, false, null); // Send the ApiVersionsRequest client.ready(node, time.milliseconds()); From cc6f51baaee7d4c0dcfe1e766e25c5bbd38fb74b Mon Sep 17 00:00:00 2001 From: Kirk True Date: Tue, 24 Mar 2026 15:16:26 -0700 Subject: [PATCH 04/10] WIP for client --- .../apache/kafka/clients/ConfigCollector.java | 205 ++++++++++++++++++ 1 file changed, 205 insertions(+) create mode 100644 clients/src/main/java/org/apache/kafka/clients/ConfigCollector.java diff --git a/clients/src/main/java/org/apache/kafka/clients/ConfigCollector.java b/clients/src/main/java/org/apache/kafka/clients/ConfigCollector.java new file mode 100644 index 0000000000000..88f30eee1f307 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/ConfigCollector.java @@ -0,0 +1,205 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients; + +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.message.PushConfigRequestData; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Set; + +/** + * Utility class for collecting and filtering client configuration for transmission to brokers. + * This class handles filtering out sensitive configuration (passwords, security settings, etc.) + * and converting configuration to the format required by the PushConfig RPC. + */ +public class ConfigCollector { + private static final Logger log = LoggerFactory.getLogger(ConfigCollector.class); + + /** + * Collect non-sensitive configuration values for transmission to broker. + * + * @param config Client configuration (ConsumerConfig, ProducerConfig, etc.) + * @param requestedKeys Keys requested by broker ("*" for all non-sensitive) + * @param maxBytes Maximum payload size in bytes + * @return List of config entries ready for PushConfigRequest + */ + public static List collectConfigs( + AbstractConfig config, + List requestedKeys, + int maxBytes) { + + List result = new ArrayList<>(); + + // Expand wildcard "*" to all keys + Set keysToInclude = expandKeys(config, requestedKeys); + + // Filter and convert + int currentBytes = 0; + for (String key : keysToInclude) { + if (shouldExclude(key, config)) { + continue; // Skip sensitive configs + } + + Object value = config.values().get(key); + if (value == null) { + continue; + } + + ConfigDef.Type type = config.typeOf(key); + if (type == null) { + continue; // Unknown config + } + + PushConfigRequestData.ClientConfig entry = + convertToClientConfig(key, value, type); + + // Check size limit + int entrySize = estimateSize(entry); + if (currentBytes + entrySize > maxBytes) { + log.warn("Config payload would exceed {} bytes, truncating at {} entries", + maxBytes, result.size()); + break; + } + + result.add(entry); + currentBytes += entrySize; + } + + log.debug("Collected {} config entries ({} bytes)", result.size(), currentBytes); + return result; + } + + /** + * Expand wildcard or specific key list to actual keys to include. + */ + private static Set expandKeys(AbstractConfig config, List requestedKeys) { + Set keysToInclude = new HashSet<>(); + + for (String requestedKey : requestedKeys) { + if ("*".equals(requestedKey)) { + // Wildcard - include all keys from config + keysToInclude.addAll(config.values().keySet()); + } else { + // Specific key requested + keysToInclude.add(requestedKey); + } + } + + return keysToInclude; + } + + /** + * Determine if a config key should be excluded from transmission. + * Excludes passwords, security settings, class names, and other sensitive data. + */ + private static boolean shouldExclude(String key, AbstractConfig config) { + ConfigDef.Type type = config.typeOf(key); + + // 1. Exclude PASSWORD type + if (type == ConfigDef.Type.PASSWORD) { + return true; + } + + // 2. Exclude CLASS type (per KIP requirement) + if (type == ConfigDef.Type.CLASS) { + return true; + } + + // 3. Exclude bootstrap.servers + if ("bootstrap.servers".equals(key)) { + return true; + } + + // 4. Exclude security/auth related keys + String lowerKey = key.toLowerCase(Locale.ROOT); + if (lowerKey.contains("sasl.") || + lowerKey.contains("ssl.") || + lowerKey.contains("security.")) { + return true; + } + + // 5. Exclude keys ending with sensitive suffixes + if (lowerKey.endsWith(".password") || + lowerKey.endsWith(".secret") || + lowerKey.endsWith(".key") || + lowerKey.endsWith(".token")) { + return true; + } + + return false; + } + + /** + * Convert a config entry to the protocol format. + */ + private static PushConfigRequestData.ClientConfig convertToClientConfig( + String key, + Object value, + ConfigDef.Type type) { + + PushConfigRequestData.ClientConfig config = new PushConfigRequestData.ClientConfig(); + config.setName(key); + config.setValue(String.valueOf(value)); + config.setType(mapConfigType(type)); + return config; + } + + /** + * Convert ConfigDef.Type to protocol byte value. + */ + private static byte mapConfigType(ConfigDef.Type type) { + switch (type) { + case BOOLEAN: + return 0; + case STRING: + return 1; + case INT: + return 2; + case SHORT: + return 3; + case LONG: + return 4; + case DOUBLE: + return 5; + case LIST: + return 6; + case CLASS: + return 7; + case PASSWORD: + return 8; // Should never reach here due to filtering + default: + return 1; // Default to STRING + } + } + + /** + * Estimate the size of a config entry in bytes. + * This is a rough estimate for checking against maxBytes limit. + */ + private static int estimateSize(PushConfigRequestData.ClientConfig config) { + // Rough estimate: key length + value length + overhead for type and framing + return config.name().length() + config.value().length() + 10; + } +} From a67ccea82e98df317ea239aa0a00b02f6e3c0e58 Mon Sep 17 00:00:00 2001 From: Kirk True Date: Tue, 24 Mar 2026 15:52:06 -0700 Subject: [PATCH 05/10] WIP for client --- .../org/apache/kafka/clients/ClientUtils.java | 15 +- .../kafka/clients/CommonClientConfigs.java | 3 + .../apache/kafka/clients/NetworkClient.java | 301 ++++-------------- .../kafka/clients/admin/KafkaAdminClient.java | 3 +- .../clients/consumer/ConsumerConfig.java | 19 +- .../consumer/internals/ConsumerUtils.java | 3 +- .../internals/NetworkClientDelegate.java | 3 +- .../kafka/clients/producer/KafkaProducer.java | 3 +- .../kafka/clients/NetworkClientTest.java | 16 +- 9 files changed, 90 insertions(+), 276 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index a08e333f8fd9e..281dbbb3c1135 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -159,7 +159,8 @@ public static NetworkClient createNetworkClient(AbstractConfig config, int maxInFlightRequestsPerConnection, Metadata metadata, Sensor throttleTimeSensor, - ClientTelemetrySender clientTelemetrySender) { + ClientTelemetrySender clientTelemetrySender, + ClientConfigsSender clientConfigsSender) { return createNetworkClient(config, config.getString(CommonClientConfigs.CLIENT_ID_CONFIG), metrics, @@ -173,7 +174,8 @@ public static NetworkClient createNetworkClient(AbstractConfig config, null, new DefaultHostResolver(), throttleTimeSensor, - clientTelemetrySender); + clientTelemetrySender, + clientConfigsSender); } public static NetworkClient createNetworkClient(AbstractConfig config, @@ -200,6 +202,7 @@ public static NetworkClient createNetworkClient(AbstractConfig config, metadataUpdater, hostResolver, null, + null, null); } @@ -216,7 +219,8 @@ public static NetworkClient createNetworkClient(AbstractConfig config, MetadataUpdater metadataUpdater, HostResolver hostResolver, Sensor throttleTimeSensor, - ClientTelemetrySender clientTelemetrySender) { + ClientTelemetrySender clientTelemetrySender, + ClientConfigsSender clientConfigsSender) { ChannelBuilder channelBuilder = null; Selector selector = null; @@ -247,10 +251,9 @@ public static NetworkClient createNetworkClient(AbstractConfig config, logContext, hostResolver, clientTelemetrySender, + clientConfigsSender, config.getLong(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG), - MetadataRecoveryStrategy.forName(config.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)), - false, - null + MetadataRecoveryStrategy.forName(config.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)) ); } catch (Throwable t) { closeQuietly(selector, "Selector"); diff --git a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java index 08b861673e3d7..468e11e0304e8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java +++ b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java @@ -111,6 +111,9 @@ public class CommonClientConfigs { public static final String ENABLE_METRICS_PUSH_CONFIG = "enable.metrics.push"; public static final String ENABLE_METRICS_PUSH_DOC = "Whether to enable pushing of client metrics to the cluster, if the cluster has a client metrics subscription which matches this client."; + public static final String ENABLE_CONFIGS_PUSH_CONFIG = "enable.configs.push"; + public static final String ENABLE_CONFIGS_PUSH_DOC = "When set to 'true', the consumer will push its configuration to the broker for observability and troubleshooting."; + public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = "metrics.sample.window.ms"; public static final String METRICS_SAMPLE_WINDOW_MS_DOC = "The window of time a metrics sample is computed over."; diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java index 976579cb120c2..f7401118b2bb6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java @@ -20,15 +20,10 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; import org.apache.kafka.common.errors.UnsupportedVersionException; -import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion; -import org.apache.kafka.common.message.GetConfigSubscriptionRequestData; -import org.apache.kafka.common.message.GetConfigSubscriptionResponseData; -import org.apache.kafka.common.message.PushConfigRequestData; +import org.apache.kafka.common.message.ApiVersionsResponseData; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.network.ChannelState; import org.apache.kafka.common.network.NetworkReceive; @@ -43,12 +38,10 @@ import org.apache.kafka.common.requests.ApiVersionsRequest; import org.apache.kafka.common.requests.ApiVersionsResponse; import org.apache.kafka.common.requests.CorrelationIdMismatchException; -import org.apache.kafka.common.requests.GetConfigSubscriptionRequest; import org.apache.kafka.common.requests.GetConfigSubscriptionResponse; import org.apache.kafka.common.requests.GetTelemetrySubscriptionsResponse; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.requests.MetadataResponse; -import org.apache.kafka.common.requests.PushConfigRequest; import org.apache.kafka.common.requests.PushConfigResponse; import org.apache.kafka.common.requests.PushTelemetryResponse; import org.apache.kafka.common.requests.RequestHeader; @@ -91,17 +84,6 @@ private enum State { CLOSED } - /** - * Config push handshake state tracking - */ - private enum ConfigPushState { - NOT_STARTED, // Haven't started handshake - SUBSCRIPTION_IN_PROGRESS, // Waiting for GetConfigSubscription response - PUSH_IN_PROGRESS, // Waiting for PushConfig response - COMPLETED, // Successfully pushed config - FAILED // Failed (but client continues) - } - private final Logger log; /* the selector used to perform network i/o */ @@ -158,15 +140,7 @@ private enum ConfigPushState { private final AtomicReference state; private final TelemetrySender telemetrySender; - - // Config push state - private final boolean enableConfigPush; - private final AbstractConfig clientConfig; - private volatile Uuid clientInstanceId = Uuid.ZERO_UUID; - private volatile ConfigPushState configPushState = ConfigPushState.NOT_STARTED; - private volatile int configSubscriptionId = -1; - private volatile int configMaxBytes = 0; - private volatile List requestedConfigKeys = new ArrayList<>(); + private final ConfigsSender configsSender; public NetworkClient(Selectable selector, Metadata metadata, @@ -203,10 +177,9 @@ public NetworkClient(Selectable selector, logContext, new DefaultHostResolver(), null, + null, Long.MAX_VALUE, - metadataRecoveryStrategy, - false, - null); + metadataRecoveryStrategy); } public NetworkClient(Selectable selector, @@ -245,10 +218,9 @@ public NetworkClient(Selectable selector, logContext, new DefaultHostResolver(), null, + null, rebootstrapTriggerMs, - metadataRecoveryStrategy, - false, - null); + metadataRecoveryStrategy); } public NetworkClient(Selectable selector, @@ -287,10 +259,9 @@ public NetworkClient(Selectable selector, logContext, new DefaultHostResolver(), null, + null, Long.MAX_VALUE, - metadataRecoveryStrategy, - false, - null); + metadataRecoveryStrategy); } public NetworkClient(Selectable selector, @@ -328,10 +299,9 @@ public NetworkClient(Selectable selector, logContext, new DefaultHostResolver(), null, + null, Long.MAX_VALUE, - metadataRecoveryStrategy, - false, - null); + metadataRecoveryStrategy); } public NetworkClient(MetadataUpdater metadataUpdater, @@ -353,10 +323,9 @@ public NetworkClient(MetadataUpdater metadataUpdater, LogContext logContext, HostResolver hostResolver, ClientTelemetrySender clientTelemetrySender, + ClientConfigsSender clientConfigsSender, long rebootstrapTriggerMs, - MetadataRecoveryStrategy metadataRecoveryStrategy, - boolean enableConfigPush, - AbstractConfig clientConfig) { + MetadataRecoveryStrategy metadataRecoveryStrategy) { /* It would be better if we could pass `DefaultMetadataUpdater` from the public constructor, but it's not * possible because `DefaultMetadataUpdater` is an inner class and it can only be instantiated after the * super constructor is invoked. @@ -387,10 +356,9 @@ public NetworkClient(MetadataUpdater metadataUpdater, this.log = logContext.logger(NetworkClient.class); this.state = new AtomicReference<>(State.ACTIVE); this.telemetrySender = (clientTelemetrySender != null) ? new TelemetrySender(clientTelemetrySender) : null; + this.configsSender = (clientConfigsSender != null) ? new ConfigsSender(clientConfigsSender) : null; this.rebootstrapTriggerMs = rebootstrapTriggerMs; this.metadataRecoveryStrategy = metadataRecoveryStrategy; - this.enableConfigPush = enableConfigPush; - this.clientConfig = clientConfig; } /** @@ -475,13 +443,8 @@ private void cancelInFlightRequests(String nodeId, metadataUpdater.handleFailedRequest(now, Optional.empty()); } else if (isTelemetryApi(request.header.apiKey()) && telemetrySender != null) { telemetrySender.handleFailedRequest(request.header.apiKey(), null); - } else if (isConfigPushApi(request.header.apiKey()) && enableConfigPush) { - // Config push failed due to disconnect - if (configPushState == ConfigPushState.SUBSCRIPTION_IN_PROGRESS || - configPushState == ConfigPushState.PUSH_IN_PROGRESS) { - log.debug("Config push request failed due to disconnect"); - configPushState = ConfigPushState.FAILED; - } + } else if (isConfigPushApi(request.header.apiKey()) && configsSender != null) { + configsSender.handleFailedRequest(request.header.apiKey(), null); } } } @@ -647,14 +610,8 @@ else if (clientRequest.apiKey() == ApiKeys.METADATA) metadataUpdater.handleFailedRequest(now, Optional.of(unsupportedVersionException)); else if (isTelemetryApi(clientRequest.apiKey()) && telemetrySender != null) telemetrySender.handleFailedRequest(clientRequest.apiKey(), unsupportedVersionException); - else if (isConfigPushApi(clientRequest.apiKey()) && enableConfigPush) { - // Config push request failed due to unsupported version - if (configPushState == ConfigPushState.SUBSCRIPTION_IN_PROGRESS || - configPushState == ConfigPushState.PUSH_IN_PROGRESS) { - log.debug("Config push request failed: unsupported version"); - configPushState = ConfigPushState.FAILED; - } - } + else if (isConfigPushApi(clientRequest.apiKey()) && configsSender != null) + configsSender.handleFailedRequest(clientRequest.apiKey(), unsupportedVersionException); } } @@ -715,7 +672,6 @@ public List poll(long timeout, long now) { handleDisconnections(responses, updatedNow); handleConnections(); handleInitiateApiVersionRequests(updatedNow); - handleConfigPushHandshake(updatedNow); handleTimedOutConnections(responses, updatedNow); handleTimedOutRequests(responses, updatedNow); handleRebootstrap(responses, updatedNow); @@ -1077,9 +1033,9 @@ else if (req.isInternalRequest && response instanceof GetTelemetrySubscriptionsR else if (req.isInternalRequest && response instanceof PushTelemetryResponse) telemetrySender.handleResponse((PushTelemetryResponse) response); else if (req.isInternalRequest && response instanceof GetConfigSubscriptionResponse) - handleGetConfigSubscriptionResponse(req.destination, (GetConfigSubscriptionResponse) response, now); + configsSender.handleResponse((GetConfigSubscriptionResponse) response); else if (req.isInternalRequest && response instanceof PushConfigResponse) - handlePushConfigResponse(req.destination, (PushConfigResponse) response, now); + configsSender.handleResponse((PushConfigResponse) response); else responses.add(req.completed(response, now)); } @@ -1100,7 +1056,7 @@ private void handleApiVersionsResponse(List responses, // If not provided, the client falls back to version 0. short maxApiVersion = 0; if (apiVersionsResponse.data().apiKeys().size() > 0) { - ApiVersion apiVersion = apiVersionsResponse.data().apiKeys().find(ApiKeys.API_VERSIONS.id); + ApiVersionsResponseData.ApiVersion apiVersion = apiVersionsResponse.data().apiKeys().find(ApiKeys.API_VERSIONS.id); if (apiVersion != null) { maxApiVersion = apiVersion.maxVersion(); } @@ -1116,19 +1072,6 @@ private void handleApiVersionsResponse(List responses, apiVersionsResponse.data().finalizedFeaturesEpoch()); apiVersions.update(node, nodeVersionInfo); - // Check if we should initiate config push handshake - if (enableConfigPush && configPushState == ConfigPushState.NOT_STARTED) { - ApiVersion configSubVersion = nodeVersionInfo.apiVersion(ApiKeys.GET_CONFIG_SUBSCRIPTION); - if (configSubVersion != null) { - log.debug("Node {} supports config push (version {}), will initiate handshake", - node, configSubVersion); - // Don't send immediately - will be triggered in poll() - } else { - log.debug("Node {} does not support config push, skipping", node); - configPushState = ConfigPushState.FAILED; // Skip, not an error - } - } - this.connectionStates.ready(node); log.debug("Node {} has finalized features epoch: {}, finalized features: {}, supported features: {}, API versions: {}.", node, apiVersionsResponse.data().finalizedFeaturesEpoch(), apiVersionsResponse.data().finalizedFeatures(), @@ -1462,176 +1405,6 @@ private InProgressData(int requestVersion, boolean isPartialUpdate) { } - /** - * Handle config push handshake state machine. - * Called from poll() to progress through subscription → push → done. - */ - private void handleConfigPushHandshake(long now) { - // Only proceed if enabled and not in a terminal state - if (!enableConfigPush || - configPushState == ConfigPushState.COMPLETED || - configPushState == ConfigPushState.FAILED) { - return; - } - - // Find a ready node to send to - Node node = leastLoadedNode(now).node(); - if (node == null) { - log.trace("No node available for config push handshake"); - return; - } - - String nodeId = node.idString(); - - // Can't send if channel not ready or in-flight limit reached - if (!selector.isChannelReady(nodeId) || !inFlightRequests.canSendMore(nodeId)) { - return; - } - - // State machine progression - switch (configPushState) { - case NOT_STARTED: - initiateGetConfigSubscription(nodeId, now); - configPushState = ConfigPushState.SUBSCRIPTION_IN_PROGRESS; - break; - - case SUBSCRIPTION_IN_PROGRESS: - // Waiting for response, nothing to do - break; - - case PUSH_IN_PROGRESS: - // Waiting for response, nothing to do - break; - - default: - // Terminal states, shouldn't reach here - break; - } - } - - /** - * Send GetConfigSubscription request to broker. - */ - private void initiateGetConfigSubscription(String nodeId, long now) { - log.debug("Sending GetConfigSubscription to node {}", nodeId); - - GetConfigSubscriptionRequestData requestData = new GetConfigSubscriptionRequestData() - .setClientInstanceId(clientInstanceId); // ZERO_UUID on first call - - GetConfigSubscriptionRequest.Builder builder = - new GetConfigSubscriptionRequest.Builder(requestData); - - ClientRequest request = newClientRequest(nodeId, builder, now, true); - doSend(request, true, now); - } - - /** - * Process GetConfigSubscription response. - * Stores subscription details and transitions to push phase if successful. - */ - private void handleGetConfigSubscriptionResponse( - String nodeId, - GetConfigSubscriptionResponse response, - long now) { - - Errors error = response.error(); - if (error != Errors.NONE) { - log.warn("GetConfigSubscription request to {} failed with error: {}", nodeId, error); - configPushState = ConfigPushState.FAILED; - return; - } - - GetConfigSubscriptionResponseData data = response.data(); - - // Store client instance ID if this was the first request - Uuid receivedInstanceId = data.clientInstanceId(); - if (!receivedInstanceId.equals(Uuid.ZERO_UUID)) { - clientInstanceId = receivedInstanceId; - log.debug("Received client instance ID: {}", clientInstanceId); - } - - // Store subscription details - configSubscriptionId = data.subscriptionId(); - configMaxBytes = data.configMaxBytes(); - - // Extract requested keys - requestedConfigKeys = data.requestedKeys() - .stream() - .map(key -> key.name()) - .collect(Collectors.toList()); - - log.debug("Config subscription received: subscriptionId={}, maxBytes={}, keys={}", - configSubscriptionId, configMaxBytes, requestedConfigKeys.size()); - - // Immediately initiate push - initiatePushConfig(nodeId, now); - configPushState = ConfigPushState.PUSH_IN_PROGRESS; - } - - /** - * Collect client configuration and send PushConfig request. - */ - private void initiatePushConfig(String nodeId, long now) { - log.debug("Collecting and pushing config to node {}", nodeId); - - // Collect configs using ConfigCollector - List configs; - try { - configs = ConfigCollector.collectConfigs( - clientConfig, - requestedConfigKeys, - configMaxBytes - ); - } catch (Exception e) { - log.error("Failed to collect configs for push", e); - configPushState = ConfigPushState.FAILED; - return; - } - - // Build request - PushConfigRequestData requestData = new PushConfigRequestData() - .setClientInstanceId(clientInstanceId) - .setSubscriptionId(configSubscriptionId) - .setConfigs(configs); - - PushConfigRequest.Builder builder = new PushConfigRequest.Builder(requestData); - - ClientRequest request = newClientRequest(nodeId, builder, now, true); - doSend(request, true, now); - } - - /** - * Process PushConfig response. - * Marks handshake as complete or handles retry scenarios. - */ - private void handlePushConfigResponse( - String nodeId, - PushConfigResponse response, - long now) { - - Errors error = response.error(); - - if (error == Errors.NONE) { - log.info("Configuration push to {} completed successfully", nodeId); - configPushState = ConfigPushState.COMPLETED; - - } else if (error == Errors.UNKNOWN_CONFIG_SUBSCRIPTION_ID) { - log.warn("Subscription changed on {}, retrying GetConfigSubscription", nodeId); - // Reset to retry once - configPushState = ConfigPushState.NOT_STARTED; - configSubscriptionId = -1; - requestedConfigKeys.clear(); - - } else if (error == Errors.CONFIG_TOO_LARGE) { - log.error("Config payload too large for {}, cannot retry", nodeId); - configPushState = ConfigPushState.FAILED; - - } else { - log.warn("PushConfig to {} failed with error: {}", nodeId, error); - configPushState = ConfigPushState.FAILED; - } - } - class TelemetrySender { private final ClientTelemetrySender clientTelemetrySender; @@ -1721,6 +1494,40 @@ public void close() { } } + class ConfigsSender { + + private final ClientConfigsSender clientConfigsSender; + + public ConfigsSender(ClientConfigsSender clientConfigsSender) { + this.clientConfigsSender = clientConfigsSender; + } + + public void handleResponse(GetConfigSubscriptionResponse response) { + clientConfigsSender.handleResponse(response); + } + + public void handleResponse(PushConfigResponse response) { + clientConfigsSender.handleResponse(response); + } + + public void handleFailedRequest(ApiKeys apiKey, KafkaException maybeFatalException) { + if (apiKey == ApiKeys.GET_CONFIG_SUBSCRIPTION) + clientConfigsSender.handleFailedGetConfigsSubscriptionRequest(maybeFatalException); + else if (apiKey == ApiKeys.PUSH_TELEMETRY) + clientConfigsSender.handleFailedPushConfigsRequest(maybeFatalException); + else + throw new IllegalStateException("Invalid api key for failed configs request"); + } + + public void close() { + try { + clientConfigsSender.close(); + } catch (Exception exception) { + log.error("Failed to close client configs sender", exception); + } + } + } + @Override public ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder requestBuilder, diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java index 85f1e87459f01..5f8b57035d1c5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java @@ -563,7 +563,8 @@ static KafkaAdminClient createInternal( metadataManager.updater(), (hostResolver == null) ? new DefaultHostResolver() : hostResolver, null, - clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null)); + clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), + null); return new KafkaAdminClient(config, clientId, time, metadataManager, metrics, networkClient, timeoutProcessorFactory, logContext, clientTelemetryReporter); } catch (Throwable exc) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java index cfd31c61d2227..9ef0ec109d389 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java @@ -262,6 +262,12 @@ public class ConsumerConfig extends AbstractConfig { public static final String ENABLE_METRICS_PUSH_CONFIG = CommonClientConfigs.ENABLE_METRICS_PUSH_CONFIG; public static final String ENABLE_METRICS_PUSH_DOC = CommonClientConfigs.ENABLE_METRICS_PUSH_DOC; + /** + * enable.configs.push + */ + public static final String ENABLE_CONFIGS_PUSH_CONFIG = CommonClientConfigs.ENABLE_CONFIGS_PUSH_CONFIG; + public static final String ENABLE_CONFIGS_PUSH_DOC = CommonClientConfigs.ENABLE_CONFIGS_PUSH_DOC; + /** * retry.backoff.max.ms */ @@ -390,15 +396,6 @@ public class ConsumerConfig extends AbstractConfig { " to align with batch boundaries for optimization."; public static final String DEFAULT_SHARE_ACQUIRE_MODE = ShareAcquireMode.BATCH_OPTIMIZED.name(); - /** - * enable.config.push - */ - public static final String ENABLE_CONFIG_PUSH_CONFIG = "enable.config.push"; - private static final String ENABLE_CONFIG_PUSH_DOC = - "When set to 'true', the consumer will push its configuration to the broker " + - "for observability and troubleshooting. This is a best-effort operation and " + - "failures will not prevent the consumer from functioning normally."; - private static final AtomicInteger CONSUMER_CLIENT_ID_SEQUENCE = new AtomicInteger(1); /** @@ -710,11 +707,11 @@ public class ConsumerConfig extends AbstractConfig { new ShareAcquireMode.Validator(), Importance.MEDIUM, ConsumerConfig.SHARE_ACQUIRE_MODE_DOC) - .define(ENABLE_CONFIG_PUSH_CONFIG, + .define(ENABLE_CONFIGS_PUSH_CONFIG, Type.BOOLEAN, true, Importance.LOW, - ENABLE_CONFIG_PUSH_DOC) + ENABLE_CONFIGS_PUSH_DOC) .define(CONFIG_PROVIDERS_CONFIG, ConfigDef.Type.LIST, List.of(), diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java index 2f715e206cc07..c862f98e2d759 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java @@ -97,7 +97,8 @@ public static ConsumerNetworkClient createConsumerNetworkClient(ConsumerConfig c CONSUMER_MAX_INFLIGHT_REQUESTS_PER_CONNECTION, metadata, throttleTimeSensor, - clientTelemetrySender); + clientTelemetrySender, + null); // Will avoid blocking an extended period of time to prevent heartbeat thread starvation int heartbeatIntervalMs = config.getInt(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java index 42c1c73acb565..2ac72d657f617 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java @@ -484,7 +484,8 @@ protected NetworkClientDelegate create() { CONSUMER_MAX_INFLIGHT_REQUESTS_PER_CONNECTION, metadata, throttleTimeSensor, - clientTelemetrySender); + clientTelemetrySender, + null); return new NetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler, notifyMetadataErrorsViaErrorQueue, asyncConsumerMetrics); } }; diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java index 31a9f6b945c61..a1a30b34847e0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java @@ -536,7 +536,8 @@ Sender newSender(LogContext logContext, KafkaClient kafkaClient, ProducerMetadat maxInflightRequests, metadata, throttleTimeSensor, - clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null)); + clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), + null); short acks = Short.parseShort(producerConfig.getString(ProducerConfig.ACKS_CONFIG)); return new Sender(logContext, diff --git a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java index 7e8fbe20fefbb..8927d83f48ab2 100644 --- a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java @@ -1151,8 +1151,8 @@ public void testReconnectAfterAddressChange() { NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE, reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, - time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, - Long.MAX_VALUE, MetadataRecoveryStrategy.NONE, false, null); + time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, + Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // Connect to one the initial addresses, then change the addresses and disconnect client.ready(node, time.milliseconds()); @@ -1212,8 +1212,8 @@ public void testFailedConnectionToFirstAddress() { NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE, reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, - time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, - Long.MAX_VALUE, MetadataRecoveryStrategy.NONE, false, null); + time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, + Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // First connection attempt should fail client.ready(node, time.milliseconds()); @@ -1265,8 +1265,8 @@ public void testFailedConnectionToFirstAddressAfterReconnect() { NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE, reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, - time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, - Long.MAX_VALUE, MetadataRecoveryStrategy.NONE, false, null); + time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, + Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // Connect to one the initial addresses, then change the addresses and disconnect client.ready(node, time.milliseconds()); @@ -1374,8 +1374,8 @@ public void testTelemetryRequest() { NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE, reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, - time, true, new ApiVersions(), null, new LogContext(), new DefaultHostResolver(), mockClientTelemetrySender, - Long.MAX_VALUE, MetadataRecoveryStrategy.NONE, false, null); + time, true, new ApiVersions(), null, new LogContext(), new DefaultHostResolver(), mockClientTelemetrySender, null, + Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // Send the ApiVersionsRequest client.ready(node, time.milliseconds()); From 6e4473bcd3d9c9a2cfd2dd7763d3d3effeca2c45 Mon Sep 17 00:00:00 2001 From: Kirk True Date: Tue, 24 Mar 2026 15:55:07 -0700 Subject: [PATCH 06/10] WIP for client --- .../kafka/clients/ClientConfigsSender.java | 108 ++++++++ .../clients/DefaultClientConfigsSender.java | 250 ++++++++++++++++++ 2 files changed, 358 insertions(+) create mode 100644 clients/src/main/java/org/apache/kafka/clients/ClientConfigsSender.java create mode 100644 clients/src/main/java/org/apache/kafka/clients/DefaultClientConfigsSender.java diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientConfigsSender.java b/clients/src/main/java/org/apache/kafka/clients/ClientConfigsSender.java new file mode 100644 index 0000000000000..049d7bb2c495d --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/ClientConfigsSender.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.requests.AbstractRequest; +import org.apache.kafka.common.requests.GetConfigSubscriptionResponse; +import org.apache.kafka.common.requests.PushConfigResponse; + +import java.util.Optional; + +/** + * Interface for managing the client configuration push handshake with brokers. + *

+ * This is a one-time, best-effort operation performed during client initialization + * to push non-sensitive configuration to the broker for observability purposes. + *

+ * The handshake consists of two steps: + *

    + *
  1. GetConfigSubscription - Broker tells client what configs it wants
  2. + *
  3. PushConfig - Client sends the requested configs
  4. + *
+ */ +public interface ClientConfigsSender extends AutoCloseable { + + /** + * Returns true if the config push handshake needs to proceed. + *

+ * Once the handshake is completed or fails, this should return false. + * + * @return true if handshake should continue, false if terminal state reached + */ + boolean shouldAttemptHandshake(); + + /** + * Creates the next request in the handshake flow based on current state. + *

+ * Returns GetConfigSubscriptionRequest if subscription is needed, or + * PushConfigRequest if ready to push configs. + * + * @return Optional containing the next request builder, or empty if no request needed + */ + Optional> createRequest(); + + /** + * Handle successful GetConfigSubscription response. + *

+ * This extracts the subscription details (client instance ID, requested keys, max bytes) + * and prepares for the PushConfig step. + * + * @param response the subscription response from broker + */ + void handleResponse(GetConfigSubscriptionResponse response); + + /** + * Handle successful PushConfig response. + *

+ * This completes the handshake or handles errors like UNKNOWN_CONFIG_SUBSCRIPTION_ID. + * + * @param response the push config response from broker + */ + void handleResponse(PushConfigResponse response); + + /** + * Handle get configs subscription request failure. + * + * @param kafkaException the fatal exception. + */ + void handleFailedGetConfigsSubscriptionRequest(KafkaException kafkaException); + + /** + * Handle push configs request failure. + * + * @param kafkaException the fatal exception. + */ + void handleFailedPushConfigsRequest(KafkaException kafkaException); + + /** + * Handle disconnection during the handshake. + *

+ * If a connection is lost during the handshake, mark it as failed. + */ + void handleDisconnect(); + + /** + * Returns the client instance ID assigned by the broker. + *

+ * This is initially ZERO_UUID and gets assigned during the GetConfigSubscription response. + * + * @return the client instance ID, or ZERO_UUID if not yet assigned + */ + Uuid clientInstanceId(); +} diff --git a/clients/src/main/java/org/apache/kafka/clients/DefaultClientConfigsSender.java b/clients/src/main/java/org/apache/kafka/clients/DefaultClientConfigsSender.java new file mode 100644 index 0000000000000..e128af2691289 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/DefaultClientConfigsSender.java @@ -0,0 +1,250 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.common.message.GetConfigSubscriptionRequestData; +import org.apache.kafka.common.message.GetConfigSubscriptionResponseData; +import org.apache.kafka.common.message.PushConfigRequestData; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.requests.AbstractRequest; +import org.apache.kafka.common.requests.GetConfigSubscriptionRequest; +import org.apache.kafka.common.requests.GetConfigSubscriptionResponse; +import org.apache.kafka.common.requests.PushConfigRequest; +import org.apache.kafka.common.requests.PushConfigResponse; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * Default implementation of ClientConfigsSender that manages the config push handshake. + *

+ * This implementation follows a simple state machine: + *

+ *   NOT_STARTED → SUBSCRIPTION_IN_PROGRESS → PUSH_IN_PROGRESS → COMPLETED/FAILED
+ * 
+ */ +public class DefaultClientConfigsSender implements ClientConfigsSender { + + private static final Logger log = LoggerFactory.getLogger(DefaultClientConfigsSender.class); + + @Override + public void close() throws Exception { + + } + + private enum State { + NOT_STARTED, // Initial state, need to send GetConfigSubscription + SUBSCRIPTION_IN_PROGRESS, // Waiting for GetConfigSubscription response + PUSH_IN_PROGRESS, // Waiting for PushConfig response + COMPLETED, // Successfully pushed config + FAILED // Failed (but client continues) + } + + private final AbstractConfig clientConfig; + private volatile Uuid clientInstanceId = Uuid.ZERO_UUID; + private volatile State state = State.NOT_STARTED; + private volatile int configSubscriptionId = -1; + private volatile int configMaxBytes = 0; + private volatile List requestedConfigKeys = new ArrayList<>(); + + public DefaultClientConfigsSender(AbstractConfig clientConfig) { + this.clientConfig = clientConfig; + } + + @Override + public boolean shouldAttemptHandshake() { + return state != State.COMPLETED && state != State.FAILED; + } + + @Override + public synchronized Optional> createRequest() { + switch (state) { + case NOT_STARTED: + log.debug("Creating GetConfigSubscription request"); + state = State.SUBSCRIPTION_IN_PROGRESS; + return Optional.of(createGetConfigSubscriptionRequest()); + + case SUBSCRIPTION_IN_PROGRESS: + // Waiting for subscription response, no new request to send + return Optional.empty(); + + case PUSH_IN_PROGRESS: + // Check if we have subscription details and need to send push request + if (needsPushRequest()) { + PushConfigRequest.Builder builder = createPushConfigRequest(); + if (builder != null) { + // Mark that we've created the push request + requestedConfigKeys.clear(); // Clear to avoid creating duplicate requests + return Optional.of(builder); + } + } + return Optional.empty(); + + default: + // Terminal states (COMPLETED, FAILED) + return Optional.empty(); + } + } + + @Override + public synchronized void handleResponse(GetConfigSubscriptionResponse response) { + if (state != State.SUBSCRIPTION_IN_PROGRESS) { + log.warn("Received GetConfigSubscription response in unexpected state: {}", state); + return; + } + + Errors error = response.error(); + if (error != Errors.NONE) { + log.warn("GetConfigSubscription request failed with error: {}", error); + state = State.FAILED; + return; + } + + GetConfigSubscriptionResponseData data = response.data(); + + // Store client instance ID if this was the first request + Uuid receivedInstanceId = data.clientInstanceId(); + if (!receivedInstanceId.equals(Uuid.ZERO_UUID)) { + clientInstanceId = receivedInstanceId; + log.debug("Received client instance ID: {}", clientInstanceId); + } + + // Store subscription details + configSubscriptionId = data.subscriptionId(); + configMaxBytes = data.configMaxBytes(); + + // Extract requested keys + requestedConfigKeys = data.requestedKeys() + .stream() + .map(key -> key.name()) + .collect(Collectors.toList()); + + log.debug("Config subscription received: subscriptionId={}, maxBytes={}, keys={}", + configSubscriptionId, configMaxBytes, requestedConfigKeys.size()); + + // Transition to next state - PushConfig will be created on next createRequest() call + state = State.PUSH_IN_PROGRESS; + } + + @Override + public synchronized void handleResponse(PushConfigResponse response) { + if (state != State.PUSH_IN_PROGRESS) { + log.warn("Received PushConfig response in unexpected state: {}", state); + return; + } + + Errors error = response.error(); + + if (error == Errors.NONE) { + log.info("Configuration push completed successfully"); + state = State.COMPLETED; + + } else if (error == Errors.UNKNOWN_CONFIG_SUBSCRIPTION_ID) { + log.warn("Subscription changed, retrying GetConfigSubscription"); + // Reset to retry once + state = State.NOT_STARTED; + configSubscriptionId = -1; + requestedConfigKeys.clear(); + + } else if (error == Errors.CONFIG_TOO_LARGE) { + log.error("Config payload too large, cannot retry"); + state = State.FAILED; + + } else { + log.warn("PushConfig failed with error: {}", error); + state = State.FAILED; + } + } + + @Override + public void handleFailedGetConfigsSubscriptionRequest(KafkaException kafkaException) { + + } + + @Override + public void handleFailedPushConfigsRequest(KafkaException kafkaException) { + + } + + @Override + public synchronized void handleDisconnect() { + if (state == State.SUBSCRIPTION_IN_PROGRESS || state == State.PUSH_IN_PROGRESS) { + log.debug("Disconnected during config push handshake"); + state = State.FAILED; + } + } + + @Override + public Uuid clientInstanceId() { + return clientInstanceId; + } + + private GetConfigSubscriptionRequest.Builder createGetConfigSubscriptionRequest() { + GetConfigSubscriptionRequestData requestData = new GetConfigSubscriptionRequestData() + .setClientInstanceId(clientInstanceId); // ZERO_UUID on first call + + return new GetConfigSubscriptionRequest.Builder(requestData); + } + + /** + * Creates a PushConfig request with collected configuration. + * This should only be called after receiving a successful GetConfigSubscription response. + */ + private PushConfigRequest.Builder createPushConfigRequest() { + log.debug("Collecting and preparing config push"); + +// // Collect configs using ConfigCollector + List configs; + try { + configs = ConfigCollector.collectConfigs( + clientConfig, + requestedConfigKeys, + configMaxBytes + ); + } catch (Exception e) { + log.error("Failed to collect configs for push", e); + state = State.FAILED; + return null; + } + + // Build request + PushConfigRequestData requestData = new PushConfigRequestData() + .setClientInstanceId(clientInstanceId) + .setSubscriptionId(configSubscriptionId) + .setConfigs(configs); + + return new PushConfigRequest.Builder(requestData); + } + + /** + * Checks if we need to send a PushConfig request. + * This is true when we've received a subscription but haven't pushed yet. + */ + synchronized boolean needsPushRequest() { + return state == State.PUSH_IN_PROGRESS && + configSubscriptionId != -1 && + !requestedConfigKeys.isEmpty(); + } +} From 09d02c2c75375f35e2905e2c8574ea4a840c0956 Mon Sep 17 00:00:00 2001 From: Kirk True Date: Tue, 24 Mar 2026 16:05:16 -0700 Subject: [PATCH 07/10] WIP for client --- .../org/apache/kafka/clients/CommonClientConfigs.java | 8 ++++++++ .../clients/consumer/internals/AsyncKafkaConsumer.java | 4 ++++ .../clients/consumer/internals/ClassicKafkaConsumer.java | 6 +++++- .../kafka/clients/consumer/internals/ConsumerUtils.java | 6 ++++-- .../clients/consumer/internals/NetworkClientDelegate.java | 4 +++- .../clients/consumer/internals/ShareConsumerImpl.java | 1 + 6 files changed, 25 insertions(+), 4 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java index 468e11e0304e8..da1c173ade9e1 100644 --- a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java +++ b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java @@ -330,4 +330,12 @@ public static Optional telemetryReporter(String clientI telemetryReporter.configure(config.originals(Collections.singletonMap(CommonClientConfigs.CLIENT_ID_CONFIG, clientId))); return Optional.of(telemetryReporter); } + + public static Optional configsSender(AbstractConfig config) { + if (!config.getBoolean(CommonClientConfigs.ENABLE_CONFIGS_PUSH_CONFIG)) { + return Optional.empty(); + } + + return Optional.of(new DefaultClientConfigsSender(config)); + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java index 644613a8dee45..c0754e4cf2831 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java @@ -17,6 +17,7 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ApiVersions; +import org.apache.kafka.clients.ClientConfigsSender; import org.apache.kafka.clients.ClientUtils; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.GroupRebalanceConfig; @@ -324,6 +325,7 @@ private StreamsRebalanceListenerInvoker streamsRebalanceListenerInvoker() { private volatile boolean closed = false; // Init value is needed to avoid NPE in case of exception raised in the constructor private Optional clientTelemetryReporter = Optional.empty(); + private Optional clientConfigsSender = Optional.empty(); private final PositionsValidator positionsValidator; private AsyncPollEvent inflightPoll; @@ -399,6 +401,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, List reporters = CommonClientConfigs.metricsReporters(clientId, config); this.clientTelemetryReporter = CommonClientConfigs.telemetryReporter(clientId, config); this.clientTelemetryReporter.ifPresent(reporters::add); + this.clientConfigsSender = CommonClientConfigs.configsSender(config); this.metrics = createMetrics(config, time, reporters); this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_METRIC_GROUP); this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics); @@ -439,6 +442,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, metrics, fetchMetricsManager.throttleTimeSensor(), clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), + clientConfigsSender.orElse(null), backgroundEventHandler, false, asyncConsumerMetrics diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java index 5b54a759a9844..eeeb18d8ad4cc 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java @@ -17,6 +17,7 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ApiVersions; +import org.apache.kafka.clients.ClientConfigsSender; import org.apache.kafka.clients.ClientUtils; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.GroupRebalanceConfig; @@ -143,6 +144,7 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { private final List assignors; // Init value is needed to avoid NPE in case of exception raised in the constructor private Optional clientTelemetryReporter = Optional.empty(); + private Optional clientConfigsSender = Optional.empty(); // currentThread holds the threadId of the current thread accessing this Consumer // and is used to prevent multi-threaded access @@ -175,6 +177,7 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { List reporters = CommonClientConfigs.metricsReporters(clientId, config); this.clientTelemetryReporter = CommonClientConfigs.telemetryReporter(clientId, config); this.clientTelemetryReporter.ifPresent(reporters::add); + this.clientConfigsSender = CommonClientConfigs.configsSender(config); this.metrics = createMetrics(config, time, reporters); this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); this.retryBackoffMaxMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG); @@ -204,7 +207,8 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { metadata, fetchMetricsManager.throttleTimeSensor(), retryBackoffMs, - clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null)); + clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), + clientConfigsSender.orElse(null)); this.assignors = ConsumerPartitionAssignor.getAssignorInstances( config.getList(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG), diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java index c862f98e2d759..f6d2b8c88003d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java @@ -17,6 +17,7 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ApiVersions; +import org.apache.kafka.clients.ClientConfigsSender; import org.apache.kafka.clients.ClientUtils; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.GroupRebalanceConfig; @@ -87,7 +88,8 @@ public static ConsumerNetworkClient createConsumerNetworkClient(ConsumerConfig c Metadata metadata, Sensor throttleTimeSensor, long retryBackoffMs, - ClientTelemetrySender clientTelemetrySender) { + ClientTelemetrySender clientTelemetrySender, + ClientConfigsSender clientConfigsSender) { NetworkClient netClient = ClientUtils.createNetworkClient(config, metrics, CONSUMER_METRIC_GROUP_PREFIX, @@ -98,7 +100,7 @@ public static ConsumerNetworkClient createConsumerNetworkClient(ConsumerConfig c metadata, throttleTimeSensor, clientTelemetrySender, - null); + clientConfigsSender); // Will avoid blocking an extended period of time to prevent heartbeat thread starvation int heartbeatIntervalMs = config.getInt(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java index 2ac72d657f617..42d11e52c0df3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java @@ -17,6 +17,7 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ApiVersions; +import org.apache.kafka.clients.ClientConfigsSender; import org.apache.kafka.clients.ClientRequest; import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.ClientUtils; @@ -469,6 +470,7 @@ public static Supplier supplier(final Time time, final Metrics metrics, final Sensor throttleTimeSensor, final ClientTelemetrySender clientTelemetrySender, + final ClientConfigsSender clientConfigsSender, final BackgroundEventHandler backgroundEventHandler, final boolean notifyMetadataErrorsViaErrorQueue, final AsyncConsumerMetrics asyncConsumerMetrics) { @@ -485,7 +487,7 @@ protected NetworkClientDelegate create() { metadata, throttleTimeSensor, clientTelemetrySender, - null); + clientConfigsSender); return new NetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler, notifyMetadataErrorsViaErrorQueue, asyncConsumerMetrics); } }; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java index 3a7eb81df6062..457c95d9dcf9d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java @@ -291,6 +291,7 @@ private void process(final ErrorEvent event) { metrics, shareFetchMetricsManager.throttleTimeSensor(), clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), + null, backgroundEventHandler, true, asyncConsumerMetrics From 679dc226f480ac436200d18cf0960e444fc99859 Mon Sep 17 00:00:00 2001 From: Kirk True Date: Wed, 25 Mar 2026 11:16:46 -0700 Subject: [PATCH 08/10] WIP for client --- .../org/apache/kafka/clients/ClientUtils.java | 11 +- .../clients/DefaultClientConfigsSender.java | 18 ++- .../apache/kafka/clients/NetworkClient.java | 13 ++- .../kafka/clients/admin/KafkaAdminClient.java | 3 +- .../consumer/internals/ConsumerUtils.java | 3 +- .../internals/NetworkClientDelegate.java | 3 +- .../kafka/clients/producer/KafkaProducer.java | 3 +- .../common/network/ClientInformation.java | 18 ++- .../common/requests/ApiVersionsRequest.java | 21 +++- .../common/requests/PushConfigResponse.java | 16 ++- .../SaslServerAuthenticator.java | 3 +- .../server/policy/ClientConfigPolicy.java | 110 ++++++++++++++++++ .../common/message/ApiVersionsRequest.json | 8 +- .../GetConfigSubscriptionResponse.json | 2 +- .../common/message/PushConfigResponse.json | 14 +++ .../kafka/clients/NetworkClientTest.java | 8 +- .../kafka/common/network/SelectorTest.java | 2 +- .../scala/kafka/network/SocketServer.scala | 3 +- .../main/scala/kafka/server/KafkaApis.scala | 88 +++++++++++++- .../kafka/network/RequestChannelTest.scala | 2 +- .../network/RequestConvertToJsonTest.scala | 2 +- .../kafka/server/config/ServerLogConfigs.java | 9 ++ .../network/RequestConvertToJsonTest.java | 2 +- .../metrics/ClientMetricsTestUtils.java | 4 +- .../storage/internals/log/LogConfig.java | 2 + 25 files changed, 333 insertions(+), 35 deletions(-) create mode 100644 clients/src/main/java/org/apache/kafka/server/policy/ClientConfigPolicy.java diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index 281dbbb3c1135..49d9a0555b6e2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -160,7 +160,8 @@ public static NetworkClient createNetworkClient(AbstractConfig config, Metadata metadata, Sensor throttleTimeSensor, ClientTelemetrySender clientTelemetrySender, - ClientConfigsSender clientConfigsSender) { + ClientConfigsSender clientConfigsSender, + String clientSoftwareRole) { return createNetworkClient(config, config.getString(CommonClientConfigs.CLIENT_ID_CONFIG), metrics, @@ -175,7 +176,8 @@ public static NetworkClient createNetworkClient(AbstractConfig config, new DefaultHostResolver(), throttleTimeSensor, clientTelemetrySender, - clientConfigsSender); + clientConfigsSender, + clientSoftwareRole); } public static NetworkClient createNetworkClient(AbstractConfig config, @@ -203,6 +205,7 @@ public static NetworkClient createNetworkClient(AbstractConfig config, hostResolver, null, null, + null, null); } @@ -220,7 +223,8 @@ public static NetworkClient createNetworkClient(AbstractConfig config, HostResolver hostResolver, Sensor throttleTimeSensor, ClientTelemetrySender clientTelemetrySender, - ClientConfigsSender clientConfigsSender) { + ClientConfigsSender clientConfigsSender, + String clientSoftwareRole) { ChannelBuilder channelBuilder = null; Selector selector = null; @@ -252,6 +256,7 @@ public static NetworkClient createNetworkClient(AbstractConfig config, hostResolver, clientTelemetrySender, clientConfigsSender, + clientSoftwareRole, config.getLong(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG), MetadataRecoveryStrategy.forName(config.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)) ); diff --git a/clients/src/main/java/org/apache/kafka/clients/DefaultClientConfigsSender.java b/clients/src/main/java/org/apache/kafka/clients/DefaultClientConfigsSender.java index e128af2691289..54f52ab582a48 100644 --- a/clients/src/main/java/org/apache/kafka/clients/DefaultClientConfigsSender.java +++ b/clients/src/main/java/org/apache/kafka/clients/DefaultClientConfigsSender.java @@ -22,6 +22,7 @@ import org.apache.kafka.common.message.GetConfigSubscriptionRequestData; import org.apache.kafka.common.message.GetConfigSubscriptionResponseData; import org.apache.kafka.common.message.PushConfigRequestData; +import org.apache.kafka.common.message.PushConfigResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractRequest; import org.apache.kafka.common.requests.GetConfigSubscriptionRequest; @@ -136,7 +137,7 @@ public synchronized void handleResponse(GetConfigSubscriptionResponse response) configMaxBytes = data.configMaxBytes(); // Extract requested keys - requestedConfigKeys = data.requestedKeys() + requestedConfigKeys = data.configNames() .stream() .map(key -> key.name()) .collect(Collectors.toList()); @@ -161,6 +162,21 @@ public synchronized void handleResponse(PushConfigResponse response) { log.info("Configuration push completed successfully"); state = State.COMPLETED; + } else if (error == Errors.INVALID_CONFIG) { + // Log per-config errors from the new ConfigErrors array + if (response.hasConfigErrors()) { + log.error("Configuration push failed with {} invalid config(s):", + response.configErrors().size()); + for (PushConfigResponseData.ConfigError configError : response.configErrors()) { + log.error(" Config '{}': {}", + configError.configKey(), + configError.configErrorDescription()); + } + } else { + log.error("Configuration push failed: INVALID_CONFIG (no details provided)"); + } + state = State.FAILED; + } else if (error == Errors.UNKNOWN_CONFIG_SUBSCRIPTION_ID) { log.warn("Subscription changed, retrying GetConfigSubscription"); // Reset to retry once diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java index f7401118b2bb6..0b9a00bc7e8f3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java @@ -108,6 +108,9 @@ private enum State { /* the client id used to identify this client in requests to the server */ private final String clientId; + /* the client software role used in ApiVersionsRequest */ + private final String clientSoftwareRole; + /* the current correlation id to use when sending requests to servers */ private int correlation; @@ -178,6 +181,7 @@ public NetworkClient(Selectable selector, new DefaultHostResolver(), null, null, + null, Long.MAX_VALUE, metadataRecoveryStrategy); } @@ -219,6 +223,7 @@ public NetworkClient(Selectable selector, new DefaultHostResolver(), null, null, + null, rebootstrapTriggerMs, metadataRecoveryStrategy); } @@ -260,6 +265,7 @@ public NetworkClient(Selectable selector, new DefaultHostResolver(), null, null, + null, Long.MAX_VALUE, metadataRecoveryStrategy); } @@ -300,6 +306,7 @@ public NetworkClient(Selectable selector, new DefaultHostResolver(), null, null, + null, Long.MAX_VALUE, metadataRecoveryStrategy); } @@ -324,6 +331,7 @@ public NetworkClient(MetadataUpdater metadataUpdater, HostResolver hostResolver, ClientTelemetrySender clientTelemetrySender, ClientConfigsSender clientConfigsSender, + String clientSoftwareRole, long rebootstrapTriggerMs, MetadataRecoveryStrategy metadataRecoveryStrategy) { /* It would be better if we could pass `DefaultMetadataUpdater` from the public constructor, but it's not @@ -339,6 +347,7 @@ public NetworkClient(MetadataUpdater metadataUpdater, } this.selector = selector; this.clientId = clientId; + this.clientSoftwareRole = clientSoftwareRole; this.inFlightRequests = new InFlightRequests(maxInFlightRequestsPerConnection); this.connectionStates = new ClusterConnectionStates( reconnectBackoffMs, reconnectBackoffMax, @@ -1061,7 +1070,7 @@ private void handleApiVersionsResponse(List responses, maxApiVersion = apiVersion.maxVersion(); } } - nodesNeedingApiVersionsFetch.put(node, new ApiVersionsRequest.Builder(maxApiVersion)); + nodesNeedingApiVersionsFetch.put(node, new ApiVersionsRequest.Builder(maxApiVersion).withRole(this.clientSoftwareRole)); } return; } @@ -1107,7 +1116,7 @@ private void handleConnections() { // Therefore, it is still necessary to check isChannelReady before attempting to send on this // connection. if (discoverBrokerVersions) { - nodesNeedingApiVersionsFetch.put(node, new ApiVersionsRequest.Builder()); + nodesNeedingApiVersionsFetch.put(node, new ApiVersionsRequest.Builder().withRole(this.clientSoftwareRole)); log.debug("Completed connection to node {}. Fetching API versions.", node); } else { this.connectionStates.ready(node); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java index 5f8b57035d1c5..3a45df086638d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java @@ -564,7 +564,8 @@ static KafkaAdminClient createInternal( (hostResolver == null) ? new DefaultHostResolver() : hostResolver, null, clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), - null); + null, + "admin"); return new KafkaAdminClient(config, clientId, time, metadataManager, metrics, networkClient, timeoutProcessorFactory, logContext, clientTelemetryReporter); } catch (Throwable exc) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java index f6d2b8c88003d..ec7a93b70c1cc 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java @@ -100,7 +100,8 @@ public static ConsumerNetworkClient createConsumerNetworkClient(ConsumerConfig c metadata, throttleTimeSensor, clientTelemetrySender, - clientConfigsSender); + clientConfigsSender, + "consumer"); // Will avoid blocking an extended period of time to prevent heartbeat thread starvation int heartbeatIntervalMs = config.getInt(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java index 42d11e52c0df3..c85db5561479a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java @@ -487,7 +487,8 @@ protected NetworkClientDelegate create() { metadata, throttleTimeSensor, clientTelemetrySender, - clientConfigsSender); + clientConfigsSender, + "consumer"); return new NetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler, notifyMetadataErrorsViaErrorQueue, asyncConsumerMetrics); } }; diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java index a1a30b34847e0..9a9d33fab98cb 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java @@ -537,7 +537,8 @@ Sender newSender(LogContext logContext, KafkaClient kafkaClient, ProducerMetadat metadata, throttleTimeSensor, clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), - null); + null, + "producer"); short acks = Short.parseShort(producerConfig.getString(ProducerConfig.ACKS_CONFIG)); return new Sender(logContext, diff --git a/clients/src/main/java/org/apache/kafka/common/network/ClientInformation.java b/clients/src/main/java/org/apache/kafka/common/network/ClientInformation.java index cb99a8669e40f..de9feff496da2 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ClientInformation.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ClientInformation.java @@ -21,14 +21,16 @@ public class ClientInformation { public static final String UNKNOWN_NAME_OR_VERSION = "unknown"; - public static final ClientInformation EMPTY = new ClientInformation(UNKNOWN_NAME_OR_VERSION, UNKNOWN_NAME_OR_VERSION); + public static final ClientInformation EMPTY = new ClientInformation(UNKNOWN_NAME_OR_VERSION, UNKNOWN_NAME_OR_VERSION, UNKNOWN_NAME_OR_VERSION); private final String softwareName; private final String softwareVersion; + private final String softwareRole; - public ClientInformation(String softwareName, String softwareVersion) { + public ClientInformation(String softwareName, String softwareVersion, String softwareRole) { this.softwareName = softwareName.isEmpty() ? UNKNOWN_NAME_OR_VERSION : softwareName; this.softwareVersion = softwareVersion.isEmpty() ? UNKNOWN_NAME_OR_VERSION : softwareVersion; + this.softwareRole = (softwareRole == null || softwareRole.isEmpty()) ? UNKNOWN_NAME_OR_VERSION : softwareRole; } public String softwareName() { @@ -39,15 +41,20 @@ public String softwareVersion() { return this.softwareVersion; } + public String softwareRole() { + return this.softwareRole; + } + @Override public String toString() { return "ClientInformation(softwareName=" + softwareName + - ", softwareVersion=" + softwareVersion + ")"; + ", softwareVersion=" + softwareVersion + + ", softwareRole=" + softwareRole + ")"; } @Override public int hashCode() { - return Objects.hash(softwareName, softwareVersion); + return Objects.hash(softwareName, softwareVersion, softwareRole); } @Override @@ -60,6 +67,7 @@ public boolean equals(Object o) { } ClientInformation other = (ClientInformation) o; return other.softwareName.equals(softwareName) && - other.softwareVersion.equals(softwareVersion); + other.softwareVersion.equals(softwareVersion) && + other.softwareRole.equals(softwareRole); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsRequest.java index 1bdb0903c7d7d..6c17cc9fd33ba 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsRequest.java @@ -33,7 +33,8 @@ public static class Builder extends AbstractRequest.Builder private static final ApiVersionsRequestData DEFAULT_DATA = new ApiVersionsRequestData() .setClientSoftwareName(DEFAULT_CLIENT_SOFTWARE_NAME) - .setClientSoftwareVersion(AppInfoParser.getVersion()); + .setClientSoftwareVersion(AppInfoParser.getVersion()) + .setClientSoftwareRole(null); private final ApiVersionsRequestData data; @@ -56,6 +57,11 @@ public Builder( this.data = data.duplicate(); } + public Builder withRole(String role) { + this.data.setClientSoftwareRole(role); + return this; + } + @Override public ApiVersionsRequest build(short version) { return new ApiVersionsRequest(data, version); @@ -95,8 +101,17 @@ public boolean hasUnsupportedRequestVersion() { public boolean isValid() { if (version() >= 3) { - return SOFTWARE_NAME_VERSION_PATTERN.matcher(data.clientSoftwareName()).matches() && - SOFTWARE_NAME_VERSION_PATTERN.matcher(data.clientSoftwareVersion()).matches(); + boolean nameValid = SOFTWARE_NAME_VERSION_PATTERN.matcher(data.clientSoftwareName()).matches(); + boolean versionValid = SOFTWARE_NAME_VERSION_PATTERN.matcher(data.clientSoftwareVersion()).matches(); + + if (version() >= 5) { + // For v5+, also validate role if present + String role = data.clientSoftwareRole(); + boolean roleValid = role == null || role.isEmpty() || + SOFTWARE_NAME_VERSION_PATTERN.matcher(role).matches(); + return nameValid && versionValid && roleValid; + } + return nameValid && versionValid; } else { return true; } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/PushConfigResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/PushConfigResponse.java index 379e5c0c17fa7..9ae47c319a39c 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/PushConfigResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/PushConfigResponse.java @@ -27,7 +27,7 @@ /** * Possible error codes: * - {@link Errors#CONFIG_TOO_LARGE} - * - {@link Errors#INVALID_CONFIG} + * - {@link Errors#INVALID_CONFIG} - Check configErrors() for per-config details * - {@link Errors#UNKNOWN_CONFIG_SUBSCRIPTION_ID} * - {@link Errors#UNSUPPORTED_VERSION} * - {@link Errors#INVALID_REQUEST} @@ -70,6 +70,20 @@ public Errors error() { return Errors.forCode(data.errorCode()); } + /** + * Returns true if there are per-config validation errors. + */ + public boolean hasConfigErrors() { + return !data.configErrors().isEmpty(); + } + + /** + * Returns the list of per-config errors, populated when ErrorCode is INVALID_CONFIG. + */ + public java.util.List configErrors() { + return data.configErrors(); + } + public static PushConfigResponse parse(Readable readable, short version) { return new PushConfigResponse(new PushConfigResponseData(readable, version)); } diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java index b84b5dc2abc94..b48375c7d7c63 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java @@ -579,7 +579,8 @@ else if (!apiVersionsRequest.isValid()) sendKafkaResponse(context, apiVersionsRequest.getErrorResponse(0, Errors.INVALID_REQUEST.exception())); else { metadataRegistry.registerClientInformation(new ClientInformation(apiVersionsRequest.data().clientSoftwareName(), - apiVersionsRequest.data().clientSoftwareVersion())); + apiVersionsRequest.data().clientSoftwareVersion(), + apiVersionsRequest.data().clientSoftwareRole())); sendKafkaResponse(context, apiVersionSupplier.apply(apiVersionsRequest.version())); setSaslState(SaslState.HANDSHAKE_REQUEST); } diff --git a/clients/src/main/java/org/apache/kafka/server/policy/ClientConfigPolicy.java b/clients/src/main/java/org/apache/kafka/server/policy/ClientConfigPolicy.java new file mode 100644 index 0000000000000..0e42a5e199ab5 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/server/policy/ClientConfigPolicy.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.server.policy; + +import org.apache.kafka.common.Configurable; +import org.apache.kafka.common.annotation.InterfaceStability; +import org.apache.kafka.common.errors.PolicyViolationException; +import org.apache.kafka.common.network.ClientInformation; + +import java.util.Map; +import java.util.Set; + +/** + * An interface for enforcing client configuration policies. + * + *

Common use cases are verifying that client configurations match expected values + * or fall within acceptable ranges for a given client profile (name, version, role). + * + *

If client.config.policy.class.name is defined, Kafka will create an instance of the specified class + * using the default constructor and will then pass the broker configs to its configure() method. + * During broker shutdown, the close() method will be invoked so that resources can be released (if + * necessary). + */ +@InterfaceStability.Evolving +public interface ClientConfigPolicy extends Configurable, AutoCloseable { + + /** + * Metadata provided for GetConfigSubscription requests. + */ + class GetConfigSubscriptionRequestMetadata { + private final ClientInformation clientInformation; + + public GetConfigSubscriptionRequestMetadata(ClientInformation clientInformation) { + this.clientInformation = clientInformation; + } + + /** + * Return the client information from the ApiVersionsRequest. + */ + public ClientInformation clientInformation() { + return clientInformation; + } + } + + /** + * Metadata provided for PushConfig requests. + */ + class PushConfigRequestMetadata { + private final ClientInformation clientInformation; + private final Map configs; + + public PushConfigRequestMetadata(ClientInformation clientInformation, + Map configs) { + this.clientInformation = clientInformation; + this.configs = configs; + } + + /** + * Return the client information from the ApiVersionsRequest. + */ + public ClientInformation clientInformation() { + return clientInformation; + } + + /** + * Return the configuration key-value pairs being pushed by the client. + */ + public Map configs() { + return configs; + } + } + + /** + * Select which configuration keys the client should send in a subsequent PushConfig request. + * + * @param metadata the GetConfigSubscription request metadata + * @return the set of configuration keys to request, or null/empty set if no configs should be requested + */ + Set configKeysToRequest(GetConfigSubscriptionRequestMetadata metadata); + + /** + * Validate the pushed client configurations. + * + * @param metadata the PushConfig request metadata + * @throws PolicyViolationException if the configurations violate the policy + */ + void validate(PushConfigRequestMetadata metadata) throws PolicyViolationException; + + /** + * Close this policy instance. Default implementation is a no-op. + */ + @Override + default void close() throws Exception { + // Default no-op + } +} diff --git a/clients/src/main/resources/common/message/ApiVersionsRequest.json b/clients/src/main/resources/common/message/ApiVersionsRequest.json index 56170c9667350..10145826b2663 100644 --- a/clients/src/main/resources/common/message/ApiVersionsRequest.json +++ b/clients/src/main/resources/common/message/ApiVersionsRequest.json @@ -23,12 +23,16 @@ // Version 3 is the first flexible version and adds ClientSoftwareName and ClientSoftwareVersion. // // Version 4 fixes KAFKA-17011, which blocked SupportedFeatures.MinVersion in the response from being 0. - "validVersions": "0-4", + // + // Version 5 adds ClientSoftwareRole. + "validVersions": "0-5", "flexibleVersions": "3+", "fields": [ { "name": "ClientSoftwareName", "type": "string", "versions": "3+", "ignorable": true, "about": "The name of the client." }, { "name": "ClientSoftwareVersion", "type": "string", "versions": "3+", - "ignorable": true, "about": "The version of the client." } + "ignorable": true, "about": "The version of the client." }, + { "name": "ClientSoftwareRole", "type": "string", "versions": "5+", + "ignorable": true, "about": "The role of the client (e.g., producer, consumer, admin)." } ] } diff --git a/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json b/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json index 29e55feb65fa2..287d8fe88c63b 100644 --- a/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json +++ b/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json @@ -41,7 +41,7 @@ "about": "The maximum bytes of config data the broker accepts in PushConfigRequest." }, { - "name": "RequestedKeys", "type": "[]ConfigKey", "versions": "0+", + "name": "ConfigNames", "type": "[]ConfigKey", "versions": "0+", "about": "The config keys the broker wants to receive.", "fields": [ { diff --git a/clients/src/main/resources/common/message/PushConfigResponse.json b/clients/src/main/resources/common/message/PushConfigResponse.json index 0b9c7f1d834b6..c64a8b9f8c3cf 100644 --- a/clients/src/main/resources/common/message/PushConfigResponse.json +++ b/clients/src/main/resources/common/message/PushConfigResponse.json @@ -27,6 +27,20 @@ { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The error code, or 0 if there was no error." + }, + { + "name": "ConfigErrors", "type": "[]ConfigError", "versions": "0+", + "about": "Per-config error details, populated when ErrorCode is INVALID_CONFIG.", + "fields": [ + { + "name": "ConfigKey", "type": "string", "versions": "0+", + "about": "The configuration key that caused the error." + }, + { + "name": "ConfigErrorDescription", "type": "string", "versions": "0+", "nullableVersions": "0+", + "about": "Description of the configuration error, or null if no error." + } + ] } ] } \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java index 8927d83f48ab2..bf13435cf0a4f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java @@ -1151,7 +1151,7 @@ public void testReconnectAfterAddressChange() { NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE, reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, - time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, + time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, null, Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // Connect to one the initial addresses, then change the addresses and disconnect @@ -1212,7 +1212,7 @@ public void testFailedConnectionToFirstAddress() { NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE, reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, - time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, + time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, null, Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // First connection attempt should fail @@ -1265,7 +1265,7 @@ public void testFailedConnectionToFirstAddressAfterReconnect() { NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE, reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, - time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, + time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, null, Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // Connect to one the initial addresses, then change the addresses and disconnect @@ -1374,7 +1374,7 @@ public void testTelemetryRequest() { NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE, reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, - time, true, new ApiVersions(), null, new LogContext(), new DefaultHostResolver(), mockClientTelemetrySender, null, + time, true, new ApiVersions(), null, new LogContext(), new DefaultHostResolver(), mockClientTelemetrySender, null, null, Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // Send the ApiVersionsRequest diff --git a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java index 347f76135866d..865f4e6a6ca84 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java @@ -865,7 +865,7 @@ public void testConnectionsByClientMetric() throws Exception { selector.channel(node).channelMetadataRegistry().clientInformation()); // Metric with unknown / unknown should not be there, metric with A / B should be there - ClientInformation clientInformation = new ClientInformation("A", "B"); + ClientInformation clientInformation = new ClientInformation("A", "B", null); selector.channel(node).channelMetadataRegistry() .registerClientInformation(clientInformation); assertEquals(clientInformation, diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index ebc0f990ce1fe..4ad2681de8f2d 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -1031,7 +1031,8 @@ private[kafka] class Processor( if (apiVersionsRequest.isValid) { channel.channelMetadataRegistry.registerClientInformation(new ClientInformation( apiVersionsRequest.data.clientSoftwareName, - apiVersionsRequest.data.clientSoftwareVersion)) + apiVersionsRequest.data.clientSoftwareVersion, + apiVersionsRequest.data.clientSoftwareRole)) } } requestChannel.sendRequest(req) diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index 0065986438470..a59bae567b013 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -64,6 +64,9 @@ import org.apache.kafka.security.DelegationTokenManager import org.apache.kafka.server.{ApiVersionManager, ClientMetricsManager, FetchManager, ProcessRole} import org.apache.kafka.server.authorizer._ import org.apache.kafka.server.common.{GroupVersion, RequestLocal, ShareVersion, StreamsVersion, TransactionVersion} +import org.apache.kafka.server.config.ServerLogConfigs +import org.apache.kafka.server.policy.ClientConfigPolicy +import org.apache.kafka.common.errors.PolicyViolationException import org.apache.kafka.server.share.context.ShareFetchContext import org.apache.kafka.server.share.{ErroneousAndValidPartitionData, SharePartitionKey} import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch @@ -72,11 +75,12 @@ import org.apache.kafka.server.transaction.AddPartitionsToTxnManager import org.apache.kafka.storage.internals.log.{AppendOrigin, RecordValidationStats} import org.apache.kafka.storage.log.metrics.BrokerTopicStats +import java.nio.charset.StandardCharsets import java.util import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.{CompletableFuture, ConcurrentHashMap} import java.util.stream.Collectors -import java.util.{Collections, Optional} +import java.util.{Collections, Optional, UUID} import scala.annotation.nowarn import scala.collection.mutable.ArrayBuffer import scala.collection.{Map, Seq, Set, mutable} @@ -120,6 +124,12 @@ class KafkaApis(val requestChannel: RequestChannel, val configManager = new ConfigAdminManager(brokerId, config, configRepository) val describeTopicPartitionsRequestHandler = new DescribeTopicPartitionsRequestHandler( metadataCache, authHelper, config) + val clientConfigPolicy: Option[ClientConfigPolicy] = + Option(config.getConfiguredInstance( + ServerLogConfigs.CLIENT_CONFIG_POLICY_CLASS_NAME_CONFIG, + classOf[ClientConfigPolicy])) + val clientConfigMaxBytes: Int = + config.getInt(ServerLogConfigs.CLIENT_CONFIG_MAX_BYTES_CONFIG) def close(): Unit = { aclApis.close() @@ -225,7 +235,9 @@ class KafkaApis(val requestChannel: RequestChannel, case ApiKeys.CONSUMER_GROUP_HEARTBEAT => handleConsumerGroupHeartbeat(request).exceptionally(handleError) case ApiKeys.CONSUMER_GROUP_DESCRIBE => handleConsumerGroupDescribe(request).exceptionally(handleError) case ApiKeys.DESCRIBE_TOPIC_PARTITIONS => handleDescribeTopicPartitionsRequest(request) + case ApiKeys.GET_CONFIG_SUBSCRIPTION => handleGetConfigSubscriptionRequest(request) case ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS => handleGetTelemetrySubscriptionsRequest(request) + case ApiKeys.PUSH_CONFIG => handlePushConfigRequest(request) case ApiKeys.PUSH_TELEMETRY => handlePushTelemetryRequest(request) case ApiKeys.LIST_CONFIG_RESOURCES => handleListConfigResources(request) case ApiKeys.ADD_RAFT_VOTER => forwardToController(request) @@ -2977,6 +2989,80 @@ class KafkaApis(val requestChannel: RequestChannel, } } + def handleGetConfigSubscriptionRequest(request: RequestChannel.Request): Unit = { + authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) + + val clientInfo = request.context.clientInformation + val configKeys: Set[String] = clientConfigPolicy match { + case Some(policy) => + val metadata = new ClientConfigPolicy.GetConfigSubscriptionRequestMetadata(clientInfo) + val keys = policy.configKeysToRequest(metadata) + if (keys == null) Set.empty else keys.asScala.toSet + case None => + Set.empty + } + + val subscriptionId = math.abs(UUID.randomUUID().hashCode()) + val configNamesList = configKeys.map { name => + new GetConfigSubscriptionResponseData.ConfigKey().setName(name) + }.toList.asJava + + val responseData = new GetConfigSubscriptionResponseData() + .setSubscriptionId(subscriptionId) + .setConfigMaxBytes(clientConfigMaxBytes) + .setConfigNames(configNamesList) + + requestHelper.sendResponseMaybeThrottle(request, + requestThrottleMs => new GetConfigSubscriptionResponse(responseData)) + } + + def handlePushConfigRequest(request: RequestChannel.Request): Unit = { + authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) + + val clientInfo = request.context.clientInformation + val configsData = request.body[PushConfigRequest].data.configs.asScala + val configs = configsData.map(c => c.name -> c.value).toMap + + // Calculate size + val configsSize = configs.foldLeft(0)((acc, kv) => + acc + kv._1.getBytes(StandardCharsets.UTF_8).length + + kv._2.getBytes(StandardCharsets.UTF_8).length) + + val responseData = if (configsSize > clientConfigMaxBytes) { + new PushConfigResponseData() + .setErrorCode(Errors.CONFIG_TOO_LARGE.code) + } else { + clientConfigPolicy match { + case Some(policy) => + try { + val metadata = new ClientConfigPolicy.PushConfigRequestMetadata(clientInfo, configs.asJava) + policy.validate(metadata) + new PushConfigResponseData() + .setErrorCode(Errors.NONE.code) + } catch { + case e: PolicyViolationException => + // Parse validation errors into per-config errors + val configErrors = new util.ArrayList[PushConfigResponseData.ConfigError]() + // For now, single error - future: parse e.getMessage() for per-config details + configErrors.add(new PushConfigResponseData.ConfigError() + .setConfigKey("") + .setConfigErrorDescription(e.getMessage)) + + new PushConfigResponseData() + .setErrorCode(Errors.INVALID_CONFIG.code) + .setConfigErrors(configErrors) + } + case None => + // No policy configured, accept all + new PushConfigResponseData() + .setErrorCode(Errors.NONE.code) + } + } + + requestHelper.sendResponseMaybeThrottle(request, + requestThrottleMs => new PushConfigResponse(responseData)) + } + /** * Handle ListConfigResourcesRequest. If resourceTypes are not specified, it uses ListConfigResourcesRequest#supportedResourceTypes * to retrieve config resources. If resourceTypes are specified, it returns matched config resources. diff --git a/core/src/test/scala/unit/kafka/network/RequestChannelTest.scala b/core/src/test/scala/unit/kafka/network/RequestChannelTest.scala index df10430ad7246..43ee4b3e4472d 100644 --- a/core/src/test/scala/unit/kafka/network/RequestChannelTest.scala +++ b/core/src/test/scala/unit/kafka/network/RequestChannelTest.scala @@ -307,7 +307,7 @@ class RequestChannelTest { new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "user"), ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), SecurityProtocol.PLAINTEXT, - new ClientInformation("name", "version"), + new ClientInformation("name", "version", null), false) } diff --git a/core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala b/core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala index 400e13dc6bef7..90ed44eba617a 100644 --- a/core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala +++ b/core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala @@ -128,7 +128,7 @@ class RequestConvertToJsonTest { new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "user"), ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), SecurityProtocol.PLAINTEXT, - new ClientInformation("name", "version"), + new ClientInformation("name", "version", null), false) } } diff --git a/server-common/src/main/java/org/apache/kafka/server/config/ServerLogConfigs.java b/server-common/src/main/java/org/apache/kafka/server/config/ServerLogConfigs.java index d8ffd8a5e2f1d..a48c0abe07a94 100644 --- a/server-common/src/main/java/org/apache/kafka/server/config/ServerLogConfigs.java +++ b/server-common/src/main/java/org/apache/kafka/server/config/ServerLogConfigs.java @@ -164,6 +164,15 @@ public class ServerLogConfigs { "implement the org.apache.kafka.server.policy.AlterConfigPolicy interface. " + "

Note: This policy runs on the controller instead of the broker.

"; + public static final String CLIENT_CONFIG_POLICY_CLASS_NAME_CONFIG = "client.config.policy.class.name"; + public static final String CLIENT_CONFIG_POLICY_CLASS_NAME_DOC = "The client configuration policy class that should be used for validating client configurations. " + + "The class should implement the org.apache.kafka.server.policy.ClientConfigPolicy interface."; + + public static final String CLIENT_CONFIG_MAX_BYTES_CONFIG = "client.config.max.bytes"; + public static final int CLIENT_CONFIG_MAX_BYTES_DEFAULT = 10240; // 10KB + public static final String CLIENT_CONFIG_MAX_BYTES_DOC = "Maximum size in bytes for client configuration data in PushConfig requests. " + + "Requests exceeding this limit will be rejected with CONFIG_TOO_LARGE error."; + public static final String LOG_INITIAL_TASK_DELAY_MS_CONFIG = LOG_PREFIX + "initial.task.delay.ms"; public static final long LOG_INITIAL_TASK_DELAY_MS_DEFAULT = 30 * 1000L; public static final String LOG_INITIAL_TASK_DELAY_MS_DOC = "The initial task delay in millisecond when initializing " + diff --git a/server/src/test/java/org/apache/kafka/network/RequestConvertToJsonTest.java b/server/src/test/java/org/apache/kafka/network/RequestConvertToJsonTest.java index be0e4337c1413..42cc5b6fc5417 100644 --- a/server/src/test/java/org/apache/kafka/network/RequestConvertToJsonTest.java +++ b/server/src/test/java/org/apache/kafka/network/RequestConvertToJsonTest.java @@ -120,7 +120,7 @@ public void testAllResponseTypesHandled() { @Test public void testClientInfoNode() { - ClientInformation clientInfo = new ClientInformation("name", "1"); + ClientInformation clientInfo = new ClientInformation("name", "1", null); ObjectNode expectedNode = JsonNodeFactory.instance.objectNode(); expectedNode.set("softwareName", new TextNode(clientInfo.softwareName())); expectedNode.set("softwareVersion", new TextNode(clientInfo.softwareVersion())); diff --git a/server/src/test/java/org/apache/kafka/server/metrics/ClientMetricsTestUtils.java b/server/src/test/java/org/apache/kafka/server/metrics/ClientMetricsTestUtils.java index 8e8bb21b7da81..419ac130f0add 100644 --- a/server/src/test/java/org/apache/kafka/server/metrics/ClientMetricsTestUtils.java +++ b/server/src/test/java/org/apache/kafka/server/metrics/ClientMetricsTestUtils.java @@ -68,7 +68,7 @@ public static RequestContext requestContext() throws UnknownHostException { KafkaPrincipal.ANONYMOUS, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), SecurityProtocol.PLAINTEXT, - new ClientInformation("apache-kafka-java", "3.5.2"), + new ClientInformation("apache-kafka-java", "3.5.2", null), false); } @@ -94,7 +94,7 @@ public static RequestContext requestContextWithConnectionId(String connectionId) KafkaPrincipal.ANONYMOUS, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), SecurityProtocol.PLAINTEXT, - new ClientInformation("apache-kafka-java", "3.5.2"), + new ClientInformation("apache-kafka-java", "3.5.2", null), false); } diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/LogConfig.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/LogConfig.java index f81d224e7ea7f..8f8dd9248e961 100644 --- a/storage/src/main/java/org/apache/kafka/storage/internals/log/LogConfig.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/LogConfig.java @@ -179,6 +179,8 @@ public Optional serverConfigName(String configName) { .define(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, LONG, ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_DEFAULT, atLeast(0), MEDIUM, ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_DOC) .define(ServerLogConfigs.CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG, CLASS, null, LOW, ServerLogConfigs.CREATE_TOPIC_POLICY_CLASS_NAME_DOC) .define(ServerLogConfigs.ALTER_CONFIG_POLICY_CLASS_NAME_CONFIG, CLASS, null, LOW, ServerLogConfigs.ALTER_CONFIG_POLICY_CLASS_NAME_DOC) + .define(ServerLogConfigs.CLIENT_CONFIG_POLICY_CLASS_NAME_CONFIG, CLASS, null, LOW, ServerLogConfigs.CLIENT_CONFIG_POLICY_CLASS_NAME_DOC) + .define(ServerLogConfigs.CLIENT_CONFIG_MAX_BYTES_CONFIG, INT, ServerLogConfigs.CLIENT_CONFIG_MAX_BYTES_DEFAULT, atLeast(0), LOW, ServerLogConfigs.CLIENT_CONFIG_MAX_BYTES_DOC) .define(ServerLogConfigs.LOG_DIR_FAILURE_TIMEOUT_MS_CONFIG, LONG, ServerLogConfigs.LOG_DIR_FAILURE_TIMEOUT_MS_DEFAULT, atLeast(1), LOW, ServerLogConfigs.LOG_DIR_FAILURE_TIMEOUT_MS_DOC) .defineInternal(ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_CONFIG, LONG, ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_DEFAULT, atLeast(0), LOW, ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_DOC); From 397dc37cf7ea4fa330eee43097d5754431012307 Mon Sep 17 00:00:00 2001 From: Kirk True Date: Fri, 27 Mar 2026 09:33:36 -0700 Subject: [PATCH 09/10] WIP for client --- .../ClientConfigPushIntegrationTest.java | 332 ++++++++++++++++++ .../kafka/clients/ClientsTestUtils.java | 2 + .../kafka/clients/ClientConfigsSender.java | 23 +- .../org/apache/kafka/clients/ClientUtils.java | 11 +- .../apache/kafka/clients/ConfigCollector.java | 29 +- .../clients/DefaultClientConfigsSender.java | 111 +++--- .../apache/kafka/clients/NetworkClient.java | 27 +- .../kafka/clients/admin/KafkaAdminClient.java | 3 +- .../consumer/internals/ConsumerUtils.java | 3 +- .../internals/NetworkClientDelegate.java | 3 +- .../kafka/clients/producer/KafkaProducer.java | 3 +- ....java => ClientConfigPolicyException.java} | 13 +- .../errors/UnknownConfigProfileException.java | 41 +++ .../apache/kafka/common/protocol/ApiKeys.java | 2 +- .../apache/kafka/common/protocol/Errors.java | 4 +- .../common/requests/AbstractResponse.java | 4 +- .../common/requests/ApiVersionsRequest.java | 16 +- .../requests/GetConfigProfileKeysRequest.java | 74 ++++ ...java => GetConfigProfileKeysResponse.java} | 18 +- .../GetConfigSubscriptionRequest.java | 78 ---- .../common/requests/PushConfigResponse.java | 19 +- .../SaslServerAuthenticator.java | 2 +- .../server/policy/ClientConfigPolicy.java | 123 +++---- .../policy/ClientConfigProfileKeys.java | 74 ++++ .../kafka/server/policy/ClientProfile.java | 99 ++++++ .../server/policy/ClientPushConfigData.java | 85 +++++ .../common/message/ApiVersionsRequest.json | 15 +- ....json => GetConfigProfileKeysRequest.json} | 9 +- ...json => GetConfigProfileKeysResponse.json} | 26 +- .../common/message/PushConfigRequest.json | 22 +- .../common/message/PushConfigResponse.json | 16 +- .../kafka/clients/NetworkClientTest.java | 8 +- .../scala/kafka/network/SocketServer.scala | 2 +- .../main/scala/kafka/server/KafkaApis.scala | 121 ++++--- .../kafka/server/config/ServerLogConfigs.java | 2 +- 35 files changed, 1012 insertions(+), 408 deletions(-) create mode 100644 clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientConfigPushIntegrationTest.java rename clients/src/main/java/org/apache/kafka/common/errors/{UnknownConfigSubscriptionIdException.java => ClientConfigPolicyException.java} (64%) create mode 100644 clients/src/main/java/org/apache/kafka/common/errors/UnknownConfigProfileException.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/GetConfigProfileKeysRequest.java rename clients/src/main/java/org/apache/kafka/common/requests/{GetConfigSubscriptionResponse.java => GetConfigProfileKeysResponse.java} (74%) delete mode 100644 clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionRequest.java create mode 100644 clients/src/main/java/org/apache/kafka/server/policy/ClientConfigProfileKeys.java create mode 100644 clients/src/main/java/org/apache/kafka/server/policy/ClientProfile.java create mode 100644 clients/src/main/java/org/apache/kafka/server/policy/ClientPushConfigData.java rename clients/src/main/resources/common/message/{GetConfigSubscriptionRequest.json => GetConfigProfileKeysRequest.json} (79%) rename clients/src/main/resources/common/message/{GetConfigSubscriptionResponse.json => GetConfigProfileKeysResponse.json} (59%) diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientConfigPushIntegrationTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientConfigPushIntegrationTest.java new file mode 100644 index 0000000000000..5e344d3723040 --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientConfigPushIntegrationTest.java @@ -0,0 +1,332 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.clients; + +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.errors.ClientConfigPolicyException; +import org.apache.kafka.common.errors.UnknownConfigProfileException; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.server.config.ServerLogConfigs; +import org.apache.kafka.server.policy.ClientConfigPolicy; +import org.apache.kafka.server.policy.ClientConfigProfileKeys; +import org.apache.kafka.server.policy.ClientProfile; +import org.apache.kafka.server.policy.ClientPushConfigData; +f +import org.junit.jupiter.api.BeforeEach; + +import java.util.Collections; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Integration tests for the client configuration push handshake (KIP-CFG). + *

+ * Tests the end-to-end flow: + * 1. Client sends GetConfigProfileKeys request + * 2. Broker responds with profile keys and CRC + * 3. Client sends PushConfig request with collected configs + * 4. Broker validates and processes the config + */ +@ClusterTestDefaults( + brokers = 1, + serverProperties = { + @ClusterConfigProperty(key = ServerLogConfigs.CLIENT_CONFIG_POLICY_CLASS_NAME_CONFIG, + value = "org.apache.kafka.clients.ClientConfigPushIntegrationTest$TestClientConfigPolicy"), + @ClusterConfigProperty(key = ServerLogConfigs.CLIENT_CONFIG_MAX_BYTES_CONFIG, value = "1048576") + } +) +public class ClientConfigPushIntegrationTest { + + private final ClusterInstance clusterInstance; + private TestClientConfigPolicy clientConfigPolicy; + + public ClientConfigPushIntegrationTest(ClusterInstance clusterInstance) { + this.clusterInstance = clusterInstance; + } + + @BeforeEach + public void setup() throws InterruptedException { + clusterInstance.waitForReadyBrokers(); + // Reset test state + receivedProfiles.clear(); + receivedConfigs.clear(); + profileKeysCallCount.set(0); + processCallCount.set(0); + } + + @ClusterTest + public void testProducerConfigPushHandshake() throws Exception { + Map producerConfig = new java.util.HashMap<>(); + producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); + producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + producerConfig.put(ProducerConfig.CLIENT_ID_CONFIG, "test-producer-config-push"); + producerConfig.put(CommonClientConfigs.ENABLE_CONFIGS_PUSH_CONFIG, true); + + try (Producer producer = new KafkaProducer<>(producerConfig)) { + // Give time for handshake to complete + Thread.sleep(2000); + + // Verify policy was called + assertTrue(profileKeysCallCount.get() > 0, "profileKeys() should have been called"); + assertTrue(processCallCount.get() > 0, "process() should have been called"); + + // Verify we received a client profile + assertFalse(receivedProfiles.isEmpty(), "Should have received client profile"); + + // Verify we received configs + assertFalse(receivedConfigs.isEmpty(), "Should have received client configs"); + + // Verify configs were collected (should have client.id but not bootstrap.servers) + Map configs = receivedConfigs.values().iterator().next(); + assertTrue(configs.containsKey(ProducerConfig.CLIENT_ID_CONFIG), + "Should contain client.id config"); + assertFalse(configs.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG), + "Should NOT contain bootstrap.servers (sensitive)"); + } + } + + @ClusterTest + public void testConsumerConfigPushHandshake() throws Exception { + Map consumerConfig = new java.util.HashMap<>(); + consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); + consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); + consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); + consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "test-consumer-config-push"); + consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + consumerConfig.put(CommonClientConfigs.ENABLE_CONFIGS_PUSH_CONFIG, true); + + try (Consumer consumer = new KafkaConsumer<>(consumerConfig)) { + // Give time for handshake to complete + Thread.sleep(2000); + + // Verify policy was called + assertTrue(profileKeysCallCount.get() > 0, "profileKeys() should have been called"); + assertTrue(processCallCount.get() > 0, "process() should have been called"); + + // Verify we received configs + assertFalse(receivedConfigs.isEmpty(), "Should have received client configs"); + + Map configs = receivedConfigs.values().iterator().next(); + assertTrue(configs.containsKey(ConsumerConfig.GROUP_ID_CONFIG), + "Should contain group.id config"); + assertTrue(configs.containsKey(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), + "Should contain auto.offset.reset config"); + } + } + + @ClusterTest + public void testConfigPushWithEmptyProfile() throws Exception { + // Configure policy to return empty profile (no config keys requested) + returnEmptyProfile = true; + + Map producerConfig = new java.util.HashMap<>(); + producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); + producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + producerConfig.put(CommonClientConfigs.ENABLE_CONFIGS_PUSH_CONFIG, true); + + try (Producer producer = new KafkaProducer<>(producerConfig)) { + // Give time for handshake to complete + Thread.sleep(2000); + + // profileKeys() should be called + assertTrue(profileKeysCallCount.get() > 0, "profileKeys() should have been called"); + + // But process() should NOT be called since no configs were requested + assertEquals(0, processCallCount.get(), "process() should NOT have been called with empty profile"); + } + } + + @ClusterTest + public void testConfigPushDisabled() throws Exception { + Map producerConfig = new java.util.HashMap<>(); + producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); + producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + // Disable config push + producerConfig.put(CommonClientConfigs.ENABLE_CONFIGS_PUSH_CONFIG, false); + + try (Producer producer = new KafkaProducer<>(producerConfig)) { + // Give time to ensure no handshake occurs + Thread.sleep(2000); + + // Policy should NOT be called when disabled + assertEquals(0, profileKeysCallCount.get(), "profileKeys() should NOT be called when disabled"); + assertEquals(0, processCallCount.get(), "process() should NOT be called when disabled"); + } + } + + @ClusterTest + public void testClientProfileContainsMetadata() throws Exception { + Map producerConfig = new java.util.HashMap<>(); + producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); + producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + producerConfig.put(ProducerConfig.CLIENT_ID_CONFIG, "metadata-test-producer"); + producerConfig.put(CommonClientConfigs.ENABLE_CONFIGS_PUSH_CONFIG, true); + + try (Producer producer = new KafkaProducer<>(producerConfig)) { + Thread.sleep(2000); + + assertFalse(receivedProfiles.isEmpty(), "Should have received client profile"); + ClientProfile profile = receivedProfiles.values().iterator().next(); + + assertNotNull(profile.clientInstanceId(), "Client instance ID should not be null"); + assertNotNull(profile.clientSoftwareName(), "Client software name should not be null"); + assertNotNull(profile.clientSoftwareVersion(), "Client software version should not be null"); + assertNotNull(profile.clientMetadata(), "Client metadata should not be null"); + + // Verify software name is populated (e.g., "apache-kafka-java") + assertFalse(profile.clientSoftwareName().isEmpty(), + "Client software name should not be empty"); + } + } + + @ClusterTest + public void testCrcBasedProfileChangeDetection() throws Exception { + Map producerConfig = new java.util.HashMap<>(); + producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); + producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + producerConfig.put(CommonClientConfigs.ENABLE_CONFIGS_PUSH_CONFIG, true); + + try (Producer producer = new KafkaProducer<>(producerConfig)) { + Thread.sleep(2000); + + // Verify CRC was computed and is non-zero + assertTrue(profileKeysCallCount.get() > 0, "profileKeys() should have been called"); + + // The CRC should be deterministic based on the keys + // (We can't easily verify the exact value here, but we verified it's used) + } + } + + /** + * Test implementation of ClientConfigPolicy for integration tests. + *

+ * This policy: + * - Returns a standard set of config keys for all clients + * - Captures received profiles and configs for verification + * - Supports test scenarios via static flags + */ + public static class TestClientConfigPolicy implements ClientConfigPolicy { + + private final Map receivedProfiles = new ConcurrentHashMap<>(); + private final Map> receivedConfigs = new ConcurrentHashMap<>(); + private final AtomicInteger profileKeysCallCount = new AtomicInteger(0); + private final AtomicInteger processCallCount = new AtomicInteger(0); + + private final SortedSet standardConfigKeys = new TreeSet<>(Set.of( + "client.id", + "request.timeout.ms", + "retry.backoff.ms", + "metadata.max.age.ms", + "send.buffer.bytes", + "receive.buffer.bytes", + "reconnect.backoff.ms", + "reconnect.backoff.max.ms" + )); + + @Override + public void configure(Map configs) { + // No configuration needed for test policy + } + + @Override + public Set reconfigurableConfigs() { + return Collections.emptySet(); + } + + @Override + public void validateReconfiguration(Map configs) { + // No reconfiguration validation needed + } + + @Override + public void reconfigure(Map configs) { + // No reconfiguration needed + } + + @Override + public Optional profileKeys(ClientProfile clientProfile) { + profileKeysCallCount.incrementAndGet(); + + // Store the profile for verification + receivedProfiles.put(clientProfile.clientInstanceId(), clientProfile); + + if (throwUnknownProfileOnKeys) { + throw new UnknownConfigProfileException("Test: Unknown profile"); + } + + if (returnEmptyProfile) { + return Optional.empty(); + } + + // Return standard config keys + long crc = configurationProfileCrc(standardConfigKeys); + return Optional.of(new ClientConfigProfileKeys(standardConfigKeys, crc)); + } + + @Override + public void process(ClientPushConfigData pushConfigData) { + processCallCount.incrementAndGet(); + + // Store configs for verification + receivedConfigs.put( + pushConfigData.clientProfile().clientInstanceId(), + pushConfigData.configs() + ); + + if (rejectNextPush) { + throw new ClientConfigPolicyException("Test: Config validation failed"); + } + + // Validate that we received some configs + if (pushConfigData.configs().isEmpty()) { + throw new ClientConfigPolicyException("No configs received"); + } + } + + @Override + public void close() throws Exception { + // Cleanup if needed + } + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientsTestUtils.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientsTestUtils.java index 5635f6e36b756..a41450e09f098 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientsTestUtils.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientsTestUtils.java @@ -59,6 +59,8 @@ public class ClientsTestUtils { + fasdfsdadfsa; + private static final String KEY_PREFIX = "key "; private static final String VALUE_PREFIX = "value "; diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientConfigsSender.java b/clients/src/main/java/org/apache/kafka/clients/ClientConfigsSender.java index 049d7bb2c495d..8298d49f38bdb 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientConfigsSender.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientConfigsSender.java @@ -19,7 +19,7 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.requests.AbstractRequest; -import org.apache.kafka.common.requests.GetConfigSubscriptionResponse; +import org.apache.kafka.common.requests.GetConfigProfileKeysResponse; import org.apache.kafka.common.requests.PushConfigResponse; import java.util.Optional; @@ -32,7 +32,7 @@ *

* The handshake consists of two steps: *

    - *
  1. GetConfigSubscription - Broker tells client what configs it wants
  2. + *
  3. GetConfigProfileKeys - Broker tells client what configs it wants
  4. *
  5. PushConfig - Client sends the requested configs
  6. *
*/ @@ -50,7 +50,7 @@ public interface ClientConfigsSender extends AutoCloseable { /** * Creates the next request in the handshake flow based on current state. *

- * Returns GetConfigSubscriptionRequest if subscription is needed, or + * Returns GetConfigProfileKeysRequest if profile keys are needed, or * PushConfigRequest if ready to push configs. * * @return Optional containing the next request builder, or empty if no request needed @@ -58,19 +58,19 @@ public interface ClientConfigsSender extends AutoCloseable { Optional> createRequest(); /** - * Handle successful GetConfigSubscription response. + * Handle successful GetConfigProfileKeys response. *

- * This extracts the subscription details (client instance ID, requested keys, max bytes) + * This extracts the profile keys (configuration profile CRC, requested keys, max bytes) * and prepares for the PushConfig step. * - * @param response the subscription response from broker + * @param response the profile keys response from broker */ - void handleResponse(GetConfigSubscriptionResponse response); + void handleResponse(GetConfigProfileKeysResponse response); /** * Handle successful PushConfig response. *

- * This completes the handshake or handles errors like UNKNOWN_CONFIG_SUBSCRIPTION_ID. + * This completes the handshake or handles errors like UNKNOWN_CONFIG_PROFILE. * * @param response the push config response from broker */ @@ -98,11 +98,12 @@ public interface ClientConfigsSender extends AutoCloseable { void handleDisconnect(); /** - * Returns the client instance ID assigned by the broker. + * Returns the client instance ID. *

- * This is initially ZERO_UUID and gets assigned during the GetConfigSubscription response. + * This is a UUID v4 generated by the client before any network activity and is + * shared across KIP-714 (telemetry) and KIP-CFG (config push). * - * @return the client instance ID, or ZERO_UUID if not yet assigned + * @return the client instance ID */ Uuid clientInstanceId(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index 49d9a0555b6e2..281dbbb3c1135 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -160,8 +160,7 @@ public static NetworkClient createNetworkClient(AbstractConfig config, Metadata metadata, Sensor throttleTimeSensor, ClientTelemetrySender clientTelemetrySender, - ClientConfigsSender clientConfigsSender, - String clientSoftwareRole) { + ClientConfigsSender clientConfigsSender) { return createNetworkClient(config, config.getString(CommonClientConfigs.CLIENT_ID_CONFIG), metrics, @@ -176,8 +175,7 @@ public static NetworkClient createNetworkClient(AbstractConfig config, new DefaultHostResolver(), throttleTimeSensor, clientTelemetrySender, - clientConfigsSender, - clientSoftwareRole); + clientConfigsSender); } public static NetworkClient createNetworkClient(AbstractConfig config, @@ -205,7 +203,6 @@ public static NetworkClient createNetworkClient(AbstractConfig config, hostResolver, null, null, - null, null); } @@ -223,8 +220,7 @@ public static NetworkClient createNetworkClient(AbstractConfig config, HostResolver hostResolver, Sensor throttleTimeSensor, ClientTelemetrySender clientTelemetrySender, - ClientConfigsSender clientConfigsSender, - String clientSoftwareRole) { + ClientConfigsSender clientConfigsSender) { ChannelBuilder channelBuilder = null; Selector selector = null; @@ -256,7 +252,6 @@ public static NetworkClient createNetworkClient(AbstractConfig config, hostResolver, clientTelemetrySender, clientConfigsSender, - clientSoftwareRole, config.getLong(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG), MetadataRecoveryStrategy.forName(config.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)) ); diff --git a/clients/src/main/java/org/apache/kafka/clients/ConfigCollector.java b/clients/src/main/java/org/apache/kafka/clients/ConfigCollector.java index 88f30eee1f307..8093e9c9a223d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ConfigCollector.java +++ b/clients/src/main/java/org/apache/kafka/clients/ConfigCollector.java @@ -45,12 +45,12 @@ public class ConfigCollector { * @param maxBytes Maximum payload size in bytes * @return List of config entries ready for PushConfigRequest */ - public static List collectConfigs( + public static List collectConfigs( AbstractConfig config, List requestedKeys, int maxBytes) { - List result = new ArrayList<>(); + List result = new ArrayList<>(); // Expand wildcard "*" to all keys Set keysToInclude = expandKeys(config, requestedKeys); @@ -72,8 +72,8 @@ public static List collectConfigs( continue; // Unknown config } - PushConfigRequestData.ClientConfig entry = - convertToClientConfig(key, value, type); + PushConfigRequestData.Config entry = + convertToConfig(key, value, type); // Check size limit int entrySize = estimateSize(entry); @@ -154,22 +154,23 @@ private static boolean shouldExclude(String key, AbstractConfig config) { /** * Convert a config entry to the protocol format. */ - private static PushConfigRequestData.ClientConfig convertToClientConfig( + private static PushConfigRequestData.Config convertToConfig( String key, Object value, ConfigDef.Type type) { - PushConfigRequestData.ClientConfig config = new PushConfigRequestData.ClientConfig(); - config.setName(key); - config.setValue(String.valueOf(value)); - config.setType(mapConfigType(type)); + PushConfigRequestData.Config config = new PushConfigRequestData.Config(); + config.setConfigKey(key); + config.setConfigValue(String.valueOf(value)); + config.setConfigType(mapConfigType(type)); return config; } /** - * Convert ConfigDef.Type to protocol byte value. + * Convert ConfigDef.Type to protocol short value (int16). + * Maps to ConfigDef.Type ordinal values. CLASS and PASSWORD types are excluded by filtering. */ - private static byte mapConfigType(ConfigDef.Type type) { + private static short mapConfigType(ConfigDef.Type type) { switch (type) { case BOOLEAN: return 0; @@ -186,7 +187,7 @@ private static byte mapConfigType(ConfigDef.Type type) { case LIST: return 6; case CLASS: - return 7; + return 7; // Should never reach here due to filtering case PASSWORD: return 8; // Should never reach here due to filtering default: @@ -198,8 +199,8 @@ private static byte mapConfigType(ConfigDef.Type type) { * Estimate the size of a config entry in bytes. * This is a rough estimate for checking against maxBytes limit. */ - private static int estimateSize(PushConfigRequestData.ClientConfig config) { + private static int estimateSize(PushConfigRequestData.Config config) { // Rough estimate: key length + value length + overhead for type and framing - return config.name().length() + config.value().length() + 10; + return config.configKey().length() + config.configValue().length() + 10; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/DefaultClientConfigsSender.java b/clients/src/main/java/org/apache/kafka/clients/DefaultClientConfigsSender.java index 54f52ab582a48..9b7f2711f78ba 100644 --- a/clients/src/main/java/org/apache/kafka/clients/DefaultClientConfigsSender.java +++ b/clients/src/main/java/org/apache/kafka/clients/DefaultClientConfigsSender.java @@ -19,14 +19,13 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.config.AbstractConfig; -import org.apache.kafka.common.message.GetConfigSubscriptionRequestData; -import org.apache.kafka.common.message.GetConfigSubscriptionResponseData; +import org.apache.kafka.common.message.GetConfigProfileKeysRequestData; +import org.apache.kafka.common.message.GetConfigProfileKeysResponseData; import org.apache.kafka.common.message.PushConfigRequestData; -import org.apache.kafka.common.message.PushConfigResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractRequest; -import org.apache.kafka.common.requests.GetConfigSubscriptionRequest; -import org.apache.kafka.common.requests.GetConfigSubscriptionResponse; +import org.apache.kafka.common.requests.GetConfigProfileKeysRequest; +import org.apache.kafka.common.requests.GetConfigProfileKeysResponse; import org.apache.kafka.common.requests.PushConfigRequest; import org.apache.kafka.common.requests.PushConfigResponse; @@ -36,14 +35,13 @@ import java.util.ArrayList; import java.util.List; import java.util.Optional; -import java.util.stream.Collectors; /** * Default implementation of ClientConfigsSender that manages the config push handshake. *

* This implementation follows a simple state machine: *

- *   NOT_STARTED → SUBSCRIPTION_IN_PROGRESS → PUSH_IN_PROGRESS → COMPLETED/FAILED
+ *   NOT_STARTED → PROFILE_KEYS_IN_PROGRESS → PUSH_IN_PROGRESS → COMPLETED/FAILED
  * 
*/ public class DefaultClientConfigsSender implements ClientConfigsSender { @@ -56,8 +54,8 @@ public void close() throws Exception { } private enum State { - NOT_STARTED, // Initial state, need to send GetConfigSubscription - SUBSCRIPTION_IN_PROGRESS, // Waiting for GetConfigSubscription response + NOT_STARTED, // Initial state, need to send GetConfigProfileKeys + PROFILE_KEYS_IN_PROGRESS, // Waiting for GetConfigProfileKeys response PUSH_IN_PROGRESS, // Waiting for PushConfig response COMPLETED, // Successfully pushed config FAILED // Failed (but client continues) @@ -66,7 +64,7 @@ private enum State { private final AbstractConfig clientConfig; private volatile Uuid clientInstanceId = Uuid.ZERO_UUID; private volatile State state = State.NOT_STARTED; - private volatile int configSubscriptionId = -1; + private volatile long configurationProfileCrc = -1L; private volatile int configMaxBytes = 0; private volatile List requestedConfigKeys = new ArrayList<>(); @@ -83,12 +81,12 @@ public boolean shouldAttemptHandshake() { public synchronized Optional> createRequest() { switch (state) { case NOT_STARTED: - log.debug("Creating GetConfigSubscription request"); - state = State.SUBSCRIPTION_IN_PROGRESS; - return Optional.of(createGetConfigSubscriptionRequest()); + log.debug("Creating GetConfigProfileKeys request"); + state = State.PROFILE_KEYS_IN_PROGRESS; + return Optional.of(createGetConfigProfileKeysRequest()); - case SUBSCRIPTION_IN_PROGRESS: - // Waiting for subscription response, no new request to send + case PROFILE_KEYS_IN_PROGRESS: + // Waiting for profile keys response, no new request to send return Optional.empty(); case PUSH_IN_PROGRESS: @@ -110,40 +108,31 @@ public synchronized Optional> createRequest() { } @Override - public synchronized void handleResponse(GetConfigSubscriptionResponse response) { - if (state != State.SUBSCRIPTION_IN_PROGRESS) { - log.warn("Received GetConfigSubscription response in unexpected state: {}", state); + public synchronized void handleResponse(GetConfigProfileKeysResponse response) { + if (state != State.PROFILE_KEYS_IN_PROGRESS) { + log.warn("Received GetConfigProfileKeys response in unexpected state: {}", state); return; } Errors error = response.error(); if (error != Errors.NONE) { - log.warn("GetConfigSubscription request failed with error: {}", error); + log.warn("GetConfigProfileKeys request failed with error: {} - {}", + error, response.data().errorMessage()); state = State.FAILED; return; } - GetConfigSubscriptionResponseData data = response.data(); + GetConfigProfileKeysResponseData data = response.data(); - // Store client instance ID if this was the first request - Uuid receivedInstanceId = data.clientInstanceId(); - if (!receivedInstanceId.equals(Uuid.ZERO_UUID)) { - clientInstanceId = receivedInstanceId; - log.debug("Received client instance ID: {}", clientInstanceId); - } - - // Store subscription details - configSubscriptionId = data.subscriptionId(); + // Store configuration profile CRC + configurationProfileCrc = data.configurationProfileCrc(); configMaxBytes = data.configMaxBytes(); - // Extract requested keys - requestedConfigKeys = data.configNames() - .stream() - .map(key -> key.name()) - .collect(Collectors.toList()); + // Extract requested keys (now simple string array, not nested structure) + requestedConfigKeys = new ArrayList<>(data.configKeys()); - log.debug("Config subscription received: subscriptionId={}, maxBytes={}, keys={}", - configSubscriptionId, configMaxBytes, requestedConfigKeys.size()); + log.debug("Config profile received: crc={}, maxBytes={}, keys={}", + configurationProfileCrc, configMaxBytes, requestedConfigKeys.size()); // Transition to next state - PushConfig will be created on next createRequest() call state = State.PUSH_IN_PROGRESS; @@ -163,33 +152,32 @@ public synchronized void handleResponse(PushConfigResponse response) { state = State.COMPLETED; } else if (error == Errors.INVALID_CONFIG) { - // Log per-config errors from the new ConfigErrors array - if (response.hasConfigErrors()) { - log.error("Configuration push failed with {} invalid config(s):", - response.configErrors().size()); - for (PushConfigResponseData.ConfigError configError : response.configErrors()) { - log.error(" Config '{}': {}", - configError.configKey(), - configError.configErrorDescription()); - } + // Log error message from the response + String errorMessage = response.data().errorMessage(); + if (errorMessage != null && !errorMessage.isEmpty()) { + log.error("Configuration push failed: INVALID_CONFIG - {}", errorMessage); } else { log.error("Configuration push failed: INVALID_CONFIG (no details provided)"); } state = State.FAILED; - } else if (error == Errors.UNKNOWN_CONFIG_SUBSCRIPTION_ID) { - log.warn("Subscription changed, retrying GetConfigSubscription"); + } else if (error == Errors.UNKNOWN_CONFIG_PROFILE) { + log.warn("Configuration profile changed, retrying GetConfigProfileKeys"); // Reset to retry once state = State.NOT_STARTED; - configSubscriptionId = -1; + configurationProfileCrc = -1L; requestedConfigKeys.clear(); } else if (error == Errors.CONFIG_TOO_LARGE) { - log.error("Config payload too large, cannot retry"); + String errorMessage = response.data().errorMessage(); + log.error("Config payload too large, cannot retry: {}", + errorMessage != null ? errorMessage : ""); state = State.FAILED; } else { - log.warn("PushConfig failed with error: {}", error); + String errorMessage = response.data().errorMessage(); + log.warn("PushConfig failed with error: {} - {}", + error, errorMessage != null ? errorMessage : ""); state = State.FAILED; } } @@ -206,7 +194,7 @@ public void handleFailedPushConfigsRequest(KafkaException kafkaException) { @Override public synchronized void handleDisconnect() { - if (state == State.SUBSCRIPTION_IN_PROGRESS || state == State.PUSH_IN_PROGRESS) { + if (state == State.PROFILE_KEYS_IN_PROGRESS || state == State.PUSH_IN_PROGRESS) { log.debug("Disconnected during config push handshake"); state = State.FAILED; } @@ -217,22 +205,22 @@ public Uuid clientInstanceId() { return clientInstanceId; } - private GetConfigSubscriptionRequest.Builder createGetConfigSubscriptionRequest() { - GetConfigSubscriptionRequestData requestData = new GetConfigSubscriptionRequestData() - .setClientInstanceId(clientInstanceId); // ZERO_UUID on first call + private GetConfigProfileKeysRequest.Builder createGetConfigProfileKeysRequest() { + // No fields in GetConfigProfileKeysRequest - client profile comes from ApiVersionsRequest context + GetConfigProfileKeysRequestData requestData = new GetConfigProfileKeysRequestData(); - return new GetConfigSubscriptionRequest.Builder(requestData); + return new GetConfigProfileKeysRequest.Builder(requestData); } /** * Creates a PushConfig request with collected configuration. - * This should only be called after receiving a successful GetConfigSubscription response. + * This should only be called after receiving a successful GetConfigProfileKeys response. */ private PushConfigRequest.Builder createPushConfigRequest() { log.debug("Collecting and preparing config push"); -// // Collect configs using ConfigCollector - List configs; + // Collect configs using ConfigCollector + List configs; try { configs = ConfigCollector.collectConfigs( clientConfig, @@ -247,8 +235,7 @@ private PushConfigRequest.Builder createPushConfigRequest() { // Build request PushConfigRequestData requestData = new PushConfigRequestData() - .setClientInstanceId(clientInstanceId) - .setSubscriptionId(configSubscriptionId) + .setConfigurationProfileCrc(configurationProfileCrc) .setConfigs(configs); return new PushConfigRequest.Builder(requestData); @@ -256,11 +243,11 @@ private PushConfigRequest.Builder createPushConfigRequest() { /** * Checks if we need to send a PushConfig request. - * This is true when we've received a subscription but haven't pushed yet. + * This is true when we've received profile keys but haven't pushed yet. */ synchronized boolean needsPushRequest() { return state == State.PUSH_IN_PROGRESS && - configSubscriptionId != -1 && + configurationProfileCrc != -1L && !requestedConfigKeys.isEmpty(); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java index 0b9a00bc7e8f3..9d056503f51a9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java @@ -38,7 +38,7 @@ import org.apache.kafka.common.requests.ApiVersionsRequest; import org.apache.kafka.common.requests.ApiVersionsResponse; import org.apache.kafka.common.requests.CorrelationIdMismatchException; -import org.apache.kafka.common.requests.GetConfigSubscriptionResponse; +import org.apache.kafka.common.requests.GetConfigProfileKeysResponse; import org.apache.kafka.common.requests.GetTelemetrySubscriptionsResponse; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.requests.MetadataResponse; @@ -108,9 +108,6 @@ private enum State { /* the client id used to identify this client in requests to the server */ private final String clientId; - /* the client software role used in ApiVersionsRequest */ - private final String clientSoftwareRole; - /* the current correlation id to use when sending requests to servers */ private int correlation; @@ -181,7 +178,6 @@ public NetworkClient(Selectable selector, new DefaultHostResolver(), null, null, - null, Long.MAX_VALUE, metadataRecoveryStrategy); } @@ -223,7 +219,6 @@ public NetworkClient(Selectable selector, new DefaultHostResolver(), null, null, - null, rebootstrapTriggerMs, metadataRecoveryStrategy); } @@ -265,7 +260,6 @@ public NetworkClient(Selectable selector, new DefaultHostResolver(), null, null, - null, Long.MAX_VALUE, metadataRecoveryStrategy); } @@ -306,7 +300,6 @@ public NetworkClient(Selectable selector, new DefaultHostResolver(), null, null, - null, Long.MAX_VALUE, metadataRecoveryStrategy); } @@ -331,7 +324,6 @@ public NetworkClient(MetadataUpdater metadataUpdater, HostResolver hostResolver, ClientTelemetrySender clientTelemetrySender, ClientConfigsSender clientConfigsSender, - String clientSoftwareRole, long rebootstrapTriggerMs, MetadataRecoveryStrategy metadataRecoveryStrategy) { /* It would be better if we could pass `DefaultMetadataUpdater` from the public constructor, but it's not @@ -347,7 +339,6 @@ public NetworkClient(MetadataUpdater metadataUpdater, } this.selector = selector; this.clientId = clientId; - this.clientSoftwareRole = clientSoftwareRole; this.inFlightRequests = new InFlightRequests(maxInFlightRequestsPerConnection); this.connectionStates = new ClusterConnectionStates( reconnectBackoffMs, reconnectBackoffMax, @@ -1041,8 +1032,8 @@ else if (req.isInternalRequest && response instanceof GetTelemetrySubscriptionsR telemetrySender.handleResponse((GetTelemetrySubscriptionsResponse) response); else if (req.isInternalRequest && response instanceof PushTelemetryResponse) telemetrySender.handleResponse((PushTelemetryResponse) response); - else if (req.isInternalRequest && response instanceof GetConfigSubscriptionResponse) - configsSender.handleResponse((GetConfigSubscriptionResponse) response); + else if (req.isInternalRequest && response instanceof GetConfigProfileKeysResponse) + configsSender.handleResponse((GetConfigProfileKeysResponse) response); else if (req.isInternalRequest && response instanceof PushConfigResponse) configsSender.handleResponse((PushConfigResponse) response); else @@ -1070,7 +1061,7 @@ private void handleApiVersionsResponse(List responses, maxApiVersion = apiVersion.maxVersion(); } } - nodesNeedingApiVersionsFetch.put(node, new ApiVersionsRequest.Builder(maxApiVersion).withRole(this.clientSoftwareRole)); + nodesNeedingApiVersionsFetch.put(node, new ApiVersionsRequest.Builder(maxApiVersion)); } return; } @@ -1116,7 +1107,7 @@ private void handleConnections() { // Therefore, it is still necessary to check isChannelReady before attempting to send on this // connection. if (discoverBrokerVersions) { - nodesNeedingApiVersionsFetch.put(node, new ApiVersionsRequest.Builder().withRole(this.clientSoftwareRole)); + nodesNeedingApiVersionsFetch.put(node, new ApiVersionsRequest.Builder()); log.debug("Completed connection to node {}. Fetching API versions.", node); } else { this.connectionStates.ready(node); @@ -1203,7 +1194,7 @@ private boolean isTelemetryApi(ApiKeys apiKey) { } private boolean isConfigPushApi(ApiKeys apiKey) { - return apiKey == ApiKeys.GET_CONFIG_SUBSCRIPTION || apiKey == ApiKeys.PUSH_CONFIG; + return apiKey == ApiKeys.GET_CONFIG_PROFILE_KEYS || apiKey == ApiKeys.PUSH_CONFIG; } class DefaultMetadataUpdater implements MetadataUpdater { @@ -1511,7 +1502,7 @@ public ConfigsSender(ClientConfigsSender clientConfigsSender) { this.clientConfigsSender = clientConfigsSender; } - public void handleResponse(GetConfigSubscriptionResponse response) { + public void handleResponse(GetConfigProfileKeysResponse response) { clientConfigsSender.handleResponse(response); } @@ -1520,9 +1511,9 @@ public void handleResponse(PushConfigResponse response) { } public void handleFailedRequest(ApiKeys apiKey, KafkaException maybeFatalException) { - if (apiKey == ApiKeys.GET_CONFIG_SUBSCRIPTION) + if (apiKey == ApiKeys.GET_CONFIG_PROFILE_KEYS) clientConfigsSender.handleFailedGetConfigsSubscriptionRequest(maybeFatalException); - else if (apiKey == ApiKeys.PUSH_TELEMETRY) + else if (apiKey == ApiKeys.PUSH_CONFIG) clientConfigsSender.handleFailedPushConfigsRequest(maybeFatalException); else throw new IllegalStateException("Invalid api key for failed configs request"); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java index 3a45df086638d..5f8b57035d1c5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java @@ -564,8 +564,7 @@ static KafkaAdminClient createInternal( (hostResolver == null) ? new DefaultHostResolver() : hostResolver, null, clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), - null, - "admin"); + null); return new KafkaAdminClient(config, clientId, time, metadataManager, metrics, networkClient, timeoutProcessorFactory, logContext, clientTelemetryReporter); } catch (Throwable exc) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java index ec7a93b70c1cc..f6d2b8c88003d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java @@ -100,8 +100,7 @@ public static ConsumerNetworkClient createConsumerNetworkClient(ConsumerConfig c metadata, throttleTimeSensor, clientTelemetrySender, - clientConfigsSender, - "consumer"); + clientConfigsSender); // Will avoid blocking an extended period of time to prevent heartbeat thread starvation int heartbeatIntervalMs = config.getInt(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java index c85db5561479a..42d11e52c0df3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java @@ -487,8 +487,7 @@ protected NetworkClientDelegate create() { metadata, throttleTimeSensor, clientTelemetrySender, - clientConfigsSender, - "consumer"); + clientConfigsSender); return new NetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler, notifyMetadataErrorsViaErrorQueue, asyncConsumerMetrics); } }; diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java index 9a9d33fab98cb..a1a30b34847e0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java @@ -537,8 +537,7 @@ Sender newSender(LogContext logContext, KafkaClient kafkaClient, ProducerMetadat metadata, throttleTimeSensor, clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), - null, - "producer"); + null); short acks = Short.parseShort(producerConfig.getString(ProducerConfig.ACKS_CONFIG)); return new Sender(logContext, diff --git a/clients/src/main/java/org/apache/kafka/common/errors/UnknownConfigSubscriptionIdException.java b/clients/src/main/java/org/apache/kafka/common/errors/ClientConfigPolicyException.java similarity index 64% rename from clients/src/main/java/org/apache/kafka/common/errors/UnknownConfigSubscriptionIdException.java rename to clients/src/main/java/org/apache/kafka/common/errors/ClientConfigPolicyException.java index 23025a548d04b..40b42b5244a45 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/UnknownConfigSubscriptionIdException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/ClientConfigPolicyException.java @@ -17,19 +17,20 @@ package org.apache.kafka.common.errors; /** - * Exception thrown when a client sends a configuration push request with an unknown or expired subscription ID. - * This typically happens when the broker's configuration subscription has changed between the time the client - * received the subscription and when it attempted to push its configuration. + * Exception thrown when a client configuration policy violation occurs. + *

+ * This is a generic exception for ClientConfigPolicy implementations to throw when + * client configurations fail validation or enforcement rules. */ -public class UnknownConfigSubscriptionIdException extends RetriableException { +public class ClientConfigPolicyException extends ApiException { private static final long serialVersionUID = 1L; - public UnknownConfigSubscriptionIdException(String message) { + public ClientConfigPolicyException(String message) { super(message); } - public UnknownConfigSubscriptionIdException(String message, Throwable cause) { + public ClientConfigPolicyException(String message, Throwable cause) { super(message, cause); } } diff --git a/clients/src/main/java/org/apache/kafka/common/errors/UnknownConfigProfileException.java b/clients/src/main/java/org/apache/kafka/common/errors/UnknownConfigProfileException.java new file mode 100644 index 0000000000000..bbdc6e667ac86 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/errors/UnknownConfigProfileException.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.errors; + +/** + * Exception thrown when a client's configuration profile is unknown or not supported by the broker. + *

+ * This can occur in two scenarios: + *

    + *
  • During GetConfigProfileKeys: The client profile did not match any configuration profiles + * and the policy implementation does not allow for that case. This is a fatal error.
  • + *
  • During PushConfig: The client sent a request with an invalid or outdated configuration profile CRC, + * which means the configuration profile has changed. The client should retry the handshake.
  • + *
+ */ +public class UnknownConfigProfileException extends RetriableException { + + private static final long serialVersionUID = 1L; + + public UnknownConfigProfileException(String message) { + super(message); + } + + public UnknownConfigProfileException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java index a42c64196c00e..50e0b2e409a60 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java @@ -138,7 +138,7 @@ public enum ApiKeys { DESCRIBE_SHARE_GROUP_OFFSETS(ApiMessageType.DESCRIBE_SHARE_GROUP_OFFSETS), ALTER_SHARE_GROUP_OFFSETS(ApiMessageType.ALTER_SHARE_GROUP_OFFSETS), DELETE_SHARE_GROUP_OFFSETS(ApiMessageType.DELETE_SHARE_GROUP_OFFSETS), - GET_CONFIG_SUBSCRIPTION(ApiMessageType.GET_CONFIG_SUBSCRIPTION), + GET_CONFIG_PROFILE_KEYS(ApiMessageType.GET_CONFIG_PROFILE_KEYS), PUSH_CONFIG(ApiMessageType.PUSH_CONFIG); private static final Map> APIS_BY_LISTENER = diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java index 21a7119e35882..5232c05810619 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java @@ -135,7 +135,7 @@ import org.apache.kafka.common.errors.TransactionalIdAuthorizationException; import org.apache.kafka.common.errors.TransactionalIdNotFoundException; import org.apache.kafka.common.errors.UnacceptableCredentialException; -import org.apache.kafka.common.errors.UnknownConfigSubscriptionIdException; +import org.apache.kafka.common.errors.UnknownConfigProfileException; import org.apache.kafka.common.errors.UnknownControllerIdException; import org.apache.kafka.common.errors.UnknownLeaderEpochException; import org.apache.kafka.common.errors.UnknownMemberIdException; @@ -422,7 +422,7 @@ public enum Errors { STREAMS_TOPOLOGY_FENCED(132, "The supplied topology epoch is outdated.", StreamsTopologyFencedException::new), SHARE_SESSION_LIMIT_REACHED(133, "The limit of share sessions has been reached.", ShareSessionLimitReachedException::new), CONFIG_TOO_LARGE(134, "Configuration payload exceeds broker's ConfigMaxBytes limit.", ConfigTooLargeException::new), - UNKNOWN_CONFIG_SUBSCRIPTION_ID(135, "Unknown or expired config subscription ID.", UnknownConfigSubscriptionIdException::new); + UNKNOWN_CONFIG_PROFILE(135, "Client configuration profile is unknown or not supported.", UnknownConfigProfileException::new); private static final Logger log = LoggerFactory.getLogger(Errors.class); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java index e5e2b5cdbfe8e..7a89110e60606 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java @@ -291,8 +291,8 @@ public static AbstractResponse parseResponse(ApiKeys apiKey, Readable readable, return AlterShareGroupOffsetsResponse.parse(readable, version); case DELETE_SHARE_GROUP_OFFSETS: return DeleteShareGroupOffsetsResponse.parse(readable, version); - case GET_CONFIG_SUBSCRIPTION: - return GetConfigSubscriptionResponse.parse(readable, version); + case GET_CONFIG_PROFILE_KEYS: + return GetConfigProfileKeysResponse.parse(readable, version); case PUSH_CONFIG: return PushConfigResponse.parse(readable, version); default: diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsRequest.java index 6c17cc9fd33ba..4b9c4915997d6 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsRequest.java @@ -33,8 +33,7 @@ public static class Builder extends AbstractRequest.Builder private static final ApiVersionsRequestData DEFAULT_DATA = new ApiVersionsRequestData() .setClientSoftwareName(DEFAULT_CLIENT_SOFTWARE_NAME) - .setClientSoftwareVersion(AppInfoParser.getVersion()) - .setClientSoftwareRole(null); + .setClientSoftwareVersion(AppInfoParser.getVersion()); private final ApiVersionsRequestData data; @@ -57,11 +56,6 @@ public Builder( this.data = data.duplicate(); } - public Builder withRole(String role) { - this.data.setClientSoftwareRole(role); - return this; - } - @Override public ApiVersionsRequest build(short version) { return new ApiVersionsRequest(data, version); @@ -103,14 +97,6 @@ public boolean isValid() { if (version() >= 3) { boolean nameValid = SOFTWARE_NAME_VERSION_PATTERN.matcher(data.clientSoftwareName()).matches(); boolean versionValid = SOFTWARE_NAME_VERSION_PATTERN.matcher(data.clientSoftwareVersion()).matches(); - - if (version() >= 5) { - // For v5+, also validate role if present - String role = data.clientSoftwareRole(); - boolean roleValid = role == null || role.isEmpty() || - SOFTWARE_NAME_VERSION_PATTERN.matcher(role).matches(); - return nameValid && versionValid && roleValid; - } return nameValid && versionValid; } else { return true; diff --git a/clients/src/main/java/org/apache/kafka/common/requests/GetConfigProfileKeysRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/GetConfigProfileKeysRequest.java new file mode 100644 index 0000000000000..3d3ae6dd19aa4 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/GetConfigProfileKeysRequest.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.GetConfigProfileKeysRequestData; +import org.apache.kafka.common.message.GetConfigProfileKeysResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; + +public class GetConfigProfileKeysRequest extends AbstractRequest { + + public static class Builder extends AbstractRequest.Builder { + private final GetConfigProfileKeysRequestData data; + + public Builder(GetConfigProfileKeysRequestData data) { + this(data, false); + } + + public Builder(GetConfigProfileKeysRequestData data, boolean enableUnstableLastVersion) { + super(ApiKeys.GET_CONFIG_PROFILE_KEYS, enableUnstableLastVersion); + this.data = data; + } + + @Override + public GetConfigProfileKeysRequest build(short version) { + return new GetConfigProfileKeysRequest(data, version); + } + + @Override + public String toString() { + return data.toString(); + } + } + + private final GetConfigProfileKeysRequestData data; + + public GetConfigProfileKeysRequest(GetConfigProfileKeysRequestData data, short version) { + super(ApiKeys.GET_CONFIG_PROFILE_KEYS, version); + this.data = data; + } + + @Override + public GetConfigProfileKeysResponse getErrorResponse(int throttleTimeMs, Throwable e) { + GetConfigProfileKeysResponseData responseData = new GetConfigProfileKeysResponseData() + .setThrottleTimeMs(throttleTimeMs) + .setErrorCode(Errors.forException(e).code()) + .setErrorMessage(e.getMessage()); + return new GetConfigProfileKeysResponse(responseData); + } + + @Override + public GetConfigProfileKeysRequestData data() { + return data; + } + + public static GetConfigProfileKeysRequest parse(Readable readable, short version) { + return new GetConfigProfileKeysRequest(new GetConfigProfileKeysRequestData(readable, version), version); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/GetConfigProfileKeysResponse.java similarity index 74% rename from clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionResponse.java rename to clients/src/main/java/org/apache/kafka/common/requests/GetConfigProfileKeysResponse.java index 44499bdd433bc..e08a0b964e925 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/GetConfigProfileKeysResponse.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.common.requests; -import org.apache.kafka.common.message.GetConfigSubscriptionResponseData; +import org.apache.kafka.common.message.GetConfigProfileKeysResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.Readable; @@ -26,19 +26,20 @@ /** * Possible error codes: + * - {@link Errors#UNKNOWN_CONFIG_PROFILE} * - {@link Errors#UNSUPPORTED_VERSION} * - {@link Errors#INVALID_REQUEST} */ -public class GetConfigSubscriptionResponse extends AbstractResponse { - private final GetConfigSubscriptionResponseData data; +public class GetConfigProfileKeysResponse extends AbstractResponse { + private final GetConfigProfileKeysResponseData data; - public GetConfigSubscriptionResponse(GetConfigSubscriptionResponseData data) { - super(ApiKeys.GET_CONFIG_SUBSCRIPTION); + public GetConfigProfileKeysResponse(GetConfigProfileKeysResponseData data) { + super(ApiKeys.GET_CONFIG_PROFILE_KEYS); this.data = data; } @Override - public GetConfigSubscriptionResponseData data() { + public GetConfigProfileKeysResponseData data() { return data; } @@ -67,8 +68,7 @@ public Errors error() { return Errors.forCode(data.errorCode()); } - public static GetConfigSubscriptionResponse parse(Readable readable, short version) { - return new GetConfigSubscriptionResponse(new GetConfigSubscriptionResponseData( - readable, version)); + public static GetConfigProfileKeysResponse parse(Readable readable, short version) { + return new GetConfigProfileKeysResponse(new GetConfigProfileKeysResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionRequest.java deleted file mode 100644 index bd032966a858b..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/GetConfigSubscriptionRequest.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.GetConfigSubscriptionRequestData; -import org.apache.kafka.common.message.GetConfigSubscriptionResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; - -public class GetConfigSubscriptionRequest extends AbstractRequest { - - public static class Builder extends AbstractRequest.Builder { - private final GetConfigSubscriptionRequestData data; - - public Builder(GetConfigSubscriptionRequestData data) { - this(data, false); - } - - public Builder(GetConfigSubscriptionRequestData data, boolean enableUnstableLastVersion) { - super(ApiKeys.GET_CONFIG_SUBSCRIPTION, enableUnstableLastVersion); - this.data = data; - } - - @Override - public GetConfigSubscriptionRequest build(short version) { - return new GetConfigSubscriptionRequest(data, version); - } - - @Override - public String toString() { - return data.toString(); - } - } - - private final GetConfigSubscriptionRequestData data; - - public GetConfigSubscriptionRequest(GetConfigSubscriptionRequestData data, short version) { - super(ApiKeys.GET_CONFIG_SUBSCRIPTION, version); - this.data = data; - } - - @Override - public GetConfigSubscriptionResponse getErrorResponse(int throttleTimeMs, Throwable e) { - GetConfigSubscriptionResponseData responseData = new GetConfigSubscriptionResponseData() - .setErrorCode(Errors.forException(e).code()) - .setThrottleTimeMs(throttleTimeMs) - .setClientInstanceId(Uuid.ZERO_UUID) - .setSubscriptionId(-1) - .setConfigMaxBytes(0); - return new GetConfigSubscriptionResponse(responseData); - } - - @Override - public GetConfigSubscriptionRequestData data() { - return data; - } - - public static GetConfigSubscriptionRequest parse(Readable readable, short version) { - return new GetConfigSubscriptionRequest(new GetConfigSubscriptionRequestData( - readable, version), version); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/PushConfigResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/PushConfigResponse.java index 9ae47c319a39c..b1b97f19f4d87 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/PushConfigResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/PushConfigResponse.java @@ -27,8 +27,9 @@ /** * Possible error codes: * - {@link Errors#CONFIG_TOO_LARGE} - * - {@link Errors#INVALID_CONFIG} - Check configErrors() for per-config details - * - {@link Errors#UNKNOWN_CONFIG_SUBSCRIPTION_ID} + * - {@link Errors#INVALID_CONFIG} + * - {@link Errors#CLIENT_CONFIG_POLICY_EXCEPTION} + * - {@link Errors#UNKNOWN_CONFIG_PROFILE} * - {@link Errors#UNSUPPORTED_VERSION} * - {@link Errors#INVALID_REQUEST} */ @@ -70,20 +71,6 @@ public Errors error() { return Errors.forCode(data.errorCode()); } - /** - * Returns true if there are per-config validation errors. - */ - public boolean hasConfigErrors() { - return !data.configErrors().isEmpty(); - } - - /** - * Returns the list of per-config errors, populated when ErrorCode is INVALID_CONFIG. - */ - public java.util.List configErrors() { - return data.configErrors(); - } - public static PushConfigResponse parse(Readable readable, short version) { return new PushConfigResponse(new PushConfigResponseData(readable, version)); } diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java index b48375c7d7c63..5b87f2c25632a 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java @@ -580,7 +580,7 @@ else if (!apiVersionsRequest.isValid()) else { metadataRegistry.registerClientInformation(new ClientInformation(apiVersionsRequest.data().clientSoftwareName(), apiVersionsRequest.data().clientSoftwareVersion(), - apiVersionsRequest.data().clientSoftwareRole())); + null)); sendKafkaResponse(context, apiVersionSupplier.apply(apiVersionsRequest.version())); setSaslState(SaslState.HANDSHAKE_REQUEST); } diff --git a/clients/src/main/java/org/apache/kafka/server/policy/ClientConfigPolicy.java b/clients/src/main/java/org/apache/kafka/server/policy/ClientConfigPolicy.java index 0e42a5e199ab5..2717d8c4aaa78 100644 --- a/clients/src/main/java/org/apache/kafka/server/policy/ClientConfigPolicy.java +++ b/clients/src/main/java/org/apache/kafka/server/policy/ClientConfigPolicy.java @@ -16,19 +16,27 @@ */ package org.apache.kafka.server.policy; -import org.apache.kafka.common.Configurable; +import org.apache.kafka.common.Reconfigurable; import org.apache.kafka.common.annotation.InterfaceStability; -import org.apache.kafka.common.errors.PolicyViolationException; -import org.apache.kafka.common.network.ClientInformation; +import org.apache.kafka.common.errors.ClientConfigPolicyException; +import org.apache.kafka.common.errors.ConfigTooLargeException; +import org.apache.kafka.common.errors.UnknownConfigProfileException; +import org.apache.kafka.common.utils.Crc32C; -import java.util.Map; -import java.util.Set; +import java.nio.charset.StandardCharsets; +import java.util.Optional; +import java.util.SortedSet; +import java.util.stream.Collectors; /** - * An interface for enforcing client configuration policies. + * An interface for intercepting and enforcing client configuration. * - *

Common use cases are verifying that client configurations match expected values - * or fall within acceptable ranges for a given client profile (name, version, role). + *

Common use cases include: + *

    + *
  • Selecting which configuration keys to request from clients based on their profile
  • + *
  • Validating that client configurations match expected values or fall within acceptable ranges
  • + *
  • Storing client configuration snapshots for observability and troubleshooting
  • + *
* *

If client.config.policy.class.name is defined, Kafka will create an instance of the specified class * using the default constructor and will then pass the broker configs to its configure() method. @@ -36,69 +44,66 @@ * necessary). */ @InterfaceStability.Evolving -public interface ClientConfigPolicy extends Configurable, AutoCloseable { +public interface ClientConfigPolicy extends Reconfigurable, AutoCloseable { /** - * Metadata provided for GetConfigSubscription requests. - */ - class GetConfigSubscriptionRequestMetadata { - private final ClientInformation clientInformation; - - public GetConfigSubscriptionRequestMetadata(ClientInformation clientInformation) { - this.clientInformation = clientInformation; - } - - /** - * Return the client information from the ApiVersionsRequest. - */ - public ClientInformation clientInformation() { - return clientInformation; - } - } - - /** - * Metadata provided for PushConfig requests. + * Computes the CRC that serves to identify a configuration profile. The + * value is generated by calculating a CRC32C of the configuration set. + *

+ * This default implementation sorts the keys for deterministic ordering and computes + * a CRC32C checksum. Implementations may override this method if a different CRC + * calculation is needed. + * + * @param configKeys The set of configuration keys in the profile + * @return CRC32C checksum of the configuration keys */ - class PushConfigRequestMetadata { - private final ClientInformation clientInformation; - private final Map configs; - - public PushConfigRequestMetadata(ClientInformation clientInformation, - Map configs) { - this.clientInformation = clientInformation; - this.configs = configs; - } - - /** - * Return the client information from the ApiVersionsRequest. - */ - public ClientInformation clientInformation() { - return clientInformation; - } - - /** - * Return the configuration key-value pairs being pushed by the client. - */ - public Map configs() { - return configs; - } + default long configurationProfileCrc(SortedSet configKeys) { + // Sort keys for deterministic ordering + String keysString = configKeys.stream() + .collect(Collectors.joining(",")); + byte[] keysBytes = keysString.getBytes(StandardCharsets.UTF_8); + return Crc32C.compute(keysBytes, 0, keysBytes.length); } /** - * Select which configuration keys the client should send in a subsequent PushConfig request. + * Select the configuration profile keys for a given client profile. + *

+ * How much of the client profile must match a configuration profile is left up to the + * implementation. It is not a requirement that all of the client profile (name, version, and + * any optional metadata) match exactly. For example, if the configuration of the client doesn't + * change between minor versions, there's no need to provide a distinct configuration profile + * for each minor client profile difference. + *

+ * If this method returns {@code Optional.empty()}, the client will not send a PushConfig request. * - * @param metadata the GetConfigSubscription request metadata - * @return the set of configuration keys to request, or null/empty set if no configs should be requested + * @param clientProfile The client profile information from the request context + * @return Optional containing the configuration profile keys and CRC, or empty if no profile matches + * @throws UnknownConfigProfileException if the client profile did not match any configuration profiles + * and the policy implementation does not allow for that case */ - Set configKeysToRequest(GetConfigSubscriptionRequestMetadata metadata); + Optional profileKeys(ClientProfile clientProfile) + throws UnknownConfigProfileException; /** - * Validate the pushed client configurations. + * Receive the client configuration data for observability, enforcement, etc. + *

+ * This method should avoid blocking. Implementations typically: + *

    + *
  • Validate configuration values against expected ranges or patterns
  • + *
  • Store configuration snapshots in external storage for auditing
  • + *
  • Track configuration changes over time
  • + *
+ *

+ * Note: this method will not be invoked if the {@code Configs} array + * of the PushConfig request was larger than {@code client.config.max.bytes}. * - * @param metadata the PushConfig request metadata - * @throws PolicyViolationException if the configurations violate the policy + * @param pushConfigData The client profile and configuration data from the PushConfig request + * @throws UnknownConfigProfileException if the configuration profile CRC is invalid or outdated + * @throws ConfigTooLargeException if the configuration data exceeds size limits + * @throws ClientConfigPolicyException if the configurations violate the policy */ - void validate(PushConfigRequestMetadata metadata) throws PolicyViolationException; + void process(ClientPushConfigData pushConfigData) + throws UnknownConfigProfileException, ConfigTooLargeException, ClientConfigPolicyException; /** * Close this policy instance. Default implementation is a no-op. diff --git a/clients/src/main/java/org/apache/kafka/server/policy/ClientConfigProfileKeys.java b/clients/src/main/java/org/apache/kafka/server/policy/ClientConfigProfileKeys.java new file mode 100644 index 0000000000000..d2d76a6c388be --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/server/policy/ClientConfigProfileKeys.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.server.policy; + +import java.util.Objects; +import java.util.SortedSet; + +/** + * Immutable class containing the configuration profile keys. + *

+ * A configuration profile defines the set of configuration keys to capture. The ClientConfigPolicy + * uses the information in the client profile to select the most appropriate configuration profile. + *

+ * The CRC is generated by calculating a CRC32C of the configuration keys and is used to detect + * changes to the configuration profile between GetConfigProfileKeys and PushConfig requests. + */ +public final class ClientConfigProfileKeys { + private final SortedSet keys; + private final long crc; + + /** + * Creates a new ClientConfigProfileKeys. + * + * @param keys The set of configuration keys the client should provide (sorted for deterministic ordering) + * @param crc CRC32C checksum of the configuration keys for profile change detection + */ + public ClientConfigProfileKeys(SortedSet keys, long crc) { + this.keys = keys; + this.crc = crc; + } + + public SortedSet keys() { + return keys; + } + + public long crc() { + return crc; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ClientConfigProfileKeys that = (ClientConfigProfileKeys) o; + return crc == that.crc && Objects.equals(keys, that.keys); + } + + @Override + public int hashCode() { + return Objects.hash(keys, crc); + } + + @Override + public String toString() { + return "ClientConfigProfileKeys{" + + "keys=" + keys + + ", crc=" + crc + + '}'; + } +} diff --git a/clients/src/main/java/org/apache/kafka/server/policy/ClientProfile.java b/clients/src/main/java/org/apache/kafka/server/policy/ClientProfile.java new file mode 100644 index 0000000000000..528fea90bc1ac --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/server/policy/ClientProfile.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.server.policy; + +import org.apache.kafka.common.Uuid; + +import java.util.Objects; +import java.util.SortedMap; + +/** + * Immutable class containing the client profile from the RequestContext. + *

+ * A client profile is made up of the tuple of ClientSoftwareName, ClientSoftwareVersion, + * ClientInstanceId, and ClientMetadata. The client profile provides the ClientConfigPolicy + * implementation with a detailed view of the client in use, allowing it to distinguish between + * different client types (e.g., librdkafka 2.12.0 vs Apache Kafka Java 4.4.0 Producer). + */ +public final class ClientProfile { + private final Uuid clientInstanceId; + private final String clientSoftwareName; + private final String clientSoftwareVersion; + private final SortedMap clientMetadata; + + /** + * Creates a new ClientProfile. + * + * @param clientInstanceId Unique identifier for this client instance (UUID v4) + * @param clientSoftwareName The name of the client software (e.g., "apache-kafka-java") + * @param clientSoftwareVersion The version of the client software (e.g., "3.8.0") + * @param clientMetadata Optional metadata as key-value pairs for additional client context + */ + public ClientProfile( + Uuid clientInstanceId, + String clientSoftwareName, + String clientSoftwareVersion, + SortedMap clientMetadata + ) { + this.clientInstanceId = clientInstanceId; + this.clientSoftwareName = clientSoftwareName; + this.clientSoftwareVersion = clientSoftwareVersion; + this.clientMetadata = clientMetadata; + } + + public Uuid clientInstanceId() { + return clientInstanceId; + } + + public String clientSoftwareName() { + return clientSoftwareName; + } + + public String clientSoftwareVersion() { + return clientSoftwareVersion; + } + + public SortedMap clientMetadata() { + return clientMetadata; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ClientProfile that = (ClientProfile) o; + return Objects.equals(clientInstanceId, that.clientInstanceId) && + Objects.equals(clientSoftwareName, that.clientSoftwareName) && + Objects.equals(clientSoftwareVersion, that.clientSoftwareVersion) && + Objects.equals(clientMetadata, that.clientMetadata); + } + + @Override + public int hashCode() { + return Objects.hash(clientInstanceId, clientSoftwareName, clientSoftwareVersion, clientMetadata); + } + + @Override + public String toString() { + return "ClientProfile{" + + "clientInstanceId=" + clientInstanceId + + ", clientSoftwareName='" + clientSoftwareName + '\'' + + ", clientSoftwareVersion='" + clientSoftwareVersion + '\'' + + ", clientMetadata=" + clientMetadata + + '}'; + } +} diff --git a/clients/src/main/java/org/apache/kafka/server/policy/ClientPushConfigData.java b/clients/src/main/java/org/apache/kafka/server/policy/ClientPushConfigData.java new file mode 100644 index 0000000000000..39806a995fe2d --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/server/policy/ClientPushConfigData.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.server.policy; + +import java.util.Map; +import java.util.Objects; + +/** + * Immutable class containing the PushConfig API data. + *

+ * The client profile and configuration values come directly from the RPC. + * The broker supplies its current timestamp (UTC) for when the request was received. + */ +public final class ClientPushConfigData { + private final ClientProfile clientProfile; + private final Map configs; + private final long timestamp; + + /** + * Creates a new ClientPushConfigData. + * + * @param clientProfile The client profile information from the request context + * @param configs The configuration key-value pairs pushed by the client + * @param timestamp UTC timestamp (milliseconds) when the broker received the request + */ + public ClientPushConfigData( + ClientProfile clientProfile, + Map configs, + long timestamp + ) { + this.clientProfile = clientProfile; + this.configs = configs; + this.timestamp = timestamp; + } + + public ClientProfile clientProfile() { + return clientProfile; + } + + public Map configs() { + return configs; + } + + public long timestamp() { + return timestamp; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ClientPushConfigData that = (ClientPushConfigData) o; + return timestamp == that.timestamp && + Objects.equals(clientProfile, that.clientProfile) && + Objects.equals(configs, that.configs); + } + + @Override + public int hashCode() { + return Objects.hash(clientProfile, configs, timestamp); + } + + @Override + public String toString() { + return "ClientPushConfigData{" + + "clientProfile=" + clientProfile + + ", configs=" + configs + + ", timestamp=" + timestamp + + '}'; + } +} diff --git a/clients/src/main/resources/common/message/ApiVersionsRequest.json b/clients/src/main/resources/common/message/ApiVersionsRequest.json index 10145826b2663..489a6c1cf953e 100644 --- a/clients/src/main/resources/common/message/ApiVersionsRequest.json +++ b/clients/src/main/resources/common/message/ApiVersionsRequest.json @@ -24,7 +24,7 @@ // // Version 4 fixes KAFKA-17011, which blocked SupportedFeatures.MinVersion in the response from being 0. // - // Version 5 adds ClientSoftwareRole. + // Version 5 adds ClientInstanceId and ClientMetadata for KIP-CFG. "validVersions": "0-5", "flexibleVersions": "3+", "fields": [ @@ -32,7 +32,16 @@ "ignorable": true, "about": "The name of the client." }, { "name": "ClientSoftwareVersion", "type": "string", "versions": "3+", "ignorable": true, "about": "The version of the client." }, - { "name": "ClientSoftwareRole", "type": "string", "versions": "5+", - "ignorable": true, "about": "The role of the client (e.g., producer, consumer, admin)." } + { "name": "ClientInstanceId", "type": "uuid", "versions": "0+", + "ignorable": false, "about": "Unique ID for this client instance, must be set to 0 on the first request." }, + { "name": "ClientMetadata", "type": "[]ClientMetadataEntry", "versions": "5+", + "ignorable": true, "about": "Client metadata as key-value pairs.", + "fields": [ + { "name": "Key", "type": "string", "versions": "5+", + "about": "Client metadata key." }, + { "name": "Value", "type": "string", "versions": "5+", + "about": "Client metadata value." } + ] + } ] } diff --git a/clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json b/clients/src/main/resources/common/message/GetConfigProfileKeysRequest.json similarity index 79% rename from clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json rename to clients/src/main/resources/common/message/GetConfigProfileKeysRequest.json index 64f92eba1ff2c..931cc290b23b7 100644 --- a/clients/src/main/resources/common/message/GetConfigSubscriptionRequest.json +++ b/clients/src/main/resources/common/message/GetConfigProfileKeysRequest.json @@ -17,13 +17,8 @@ "apiKey": 93, "type": "request", "listeners": ["broker"], - "name": "GetConfigSubscriptionRequest", + "name": "GetConfigProfileKeysRequest", "validVersions": "0", "flexibleVersions": "0+", - "fields": [ - { - "name": "ClientInstanceId", "type": "uuid", "versions": "0+", - "about": "Unique id for this client instance, must be set to all zeros on the first request." - } - ] + "fields": [] } diff --git a/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json b/clients/src/main/resources/common/message/GetConfigProfileKeysResponse.json similarity index 59% rename from clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json rename to clients/src/main/resources/common/message/GetConfigProfileKeysResponse.json index 287d8fe88c63b..8958b4bc8e608 100644 --- a/clients/src/main/resources/common/message/GetConfigSubscriptionResponse.json +++ b/clients/src/main/resources/common/message/GetConfigProfileKeysResponse.json @@ -16,39 +16,33 @@ { "apiKey": 93, "type": "response", - "name": "GetConfigSubscriptionResponse", + "name": "GetConfigProfileKeysResponse", "validVersions": "0", "flexibleVersions": "0+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", - "about": "The duration in milliseconds for which the request was throttled." + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The error code, or 0 if there was no error." + "about": "The top-level error code." }, { - "name": "ClientInstanceId", "type": "uuid", "versions": "0+", - "about": "Assigned client instance id if request ClientInstanceId was all zeros, else echoes request value." + "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The top-level error message, or null if there was no error." }, { - "name": "SubscriptionId", "type": "int32", "versions": "0+", - "about": "Unique identifier for the current config subscription." + "name": "ConfigurationProfileCrc", "type": "int64", "versions": "0+", + "about": "CRC for the current configuration profile." }, { "name": "ConfigMaxBytes", "type": "int32", "versions": "0+", - "about": "The maximum bytes of config data the broker accepts in PushConfigRequest." + "about": "The maximum number of bytes for ConfigKeys in its serialized form, as specified by client.config.max.bytes." }, { - "name": "ConfigNames", "type": "[]ConfigKey", "versions": "0+", - "about": "The config keys the broker wants to receive.", - "fields": [ - { - "name": "Name", "type": "string", "versions": "0+", - "about": "The config key name. May be '*' for all keys." - } - ] + "name": "ConfigKeys", "type": "[]string", "versions": "0+", + "about": "The client configuration keys the server wants the client to send." } ] } \ No newline at end of file diff --git a/clients/src/main/resources/common/message/PushConfigRequest.json b/clients/src/main/resources/common/message/PushConfigRequest.json index 893a453f5cd1a..4c5fc833c750a 100644 --- a/clients/src/main/resources/common/message/PushConfigRequest.json +++ b/clients/src/main/resources/common/message/PushConfigRequest.json @@ -22,28 +22,24 @@ "flexibleVersions": "0+", "fields": [ { - "name": "ClientInstanceId", "type": "uuid", "versions": "0+", - "about": "Unique id for this client instance." + "name": "ConfigurationProfileCrc", "type": "int64", "versions": "0+", + "about": "CRC for the current configuration profile." }, { - "name": "SubscriptionId", "type": "int32", "versions": "0+", - "about": "The subscription ID from GetConfigSubscriptionResponse." - }, - { - "name": "Configs", "type": "[]ClientConfig", "versions": "0+", + "name": "Configs", "type": "[]Config", "versions": "0+", "about": "The client configuration entries.", "fields": [ { - "name": "Name", "type": "string", "versions": "0+", - "about": "The configuration key name." + "name": "ConfigKey", "type": "string", "versions": "0+", + "about": "The configuration key." }, { - "name": "Value", "type": "string", "versions": "0+", - "about": "The configuration value as a string." + "name": "ConfigValue", "type": "string", "versions": "0+", + "about": "The configuration value." }, { - "name": "Type", "type": "int8", "versions": "0+", - "about": "The configuration type: 0=BOOLEAN, 1=STRING, 2=INT, 3=SHORT, 4=LONG, 5=DOUBLE, 6=LIST, 7=CLASS." + "name": "ConfigType", "type": "int16", "versions": "0+", + "about": "Type (from ConfigDef.Type) of the ConfigValue field. Maps to ConfigDef.Type ordinal: 0=BOOLEAN, 1=STRING, 2=INT, 3=SHORT, 4=LONG, 5=DOUBLE, 6=LIST. CLASS(7) and PASSWORD(8) types are intentionally excluded." } ] } diff --git a/clients/src/main/resources/common/message/PushConfigResponse.json b/clients/src/main/resources/common/message/PushConfigResponse.json index c64a8b9f8c3cf..11f4207ed210f 100644 --- a/clients/src/main/resources/common/message/PushConfigResponse.json +++ b/clients/src/main/resources/common/message/PushConfigResponse.json @@ -22,25 +22,15 @@ "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", - "about": "The duration in milliseconds for which the request was throttled." + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The error code, or 0 if there was no error." }, { - "name": "ConfigErrors", "type": "[]ConfigError", "versions": "0+", - "about": "Per-config error details, populated when ErrorCode is INVALID_CONFIG.", - "fields": [ - { - "name": "ConfigKey", "type": "string", "versions": "0+", - "about": "The configuration key that caused the error." - }, - { - "name": "ConfigErrorDescription", "type": "string", "versions": "0+", "nullableVersions": "0+", - "about": "Description of the configuration error, or null if no error." - } - ] + "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The top-level error message, or null if there was no error." } ] } \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java index bf13435cf0a4f..8927d83f48ab2 100644 --- a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java @@ -1151,7 +1151,7 @@ public void testReconnectAfterAddressChange() { NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE, reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, - time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, null, + time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // Connect to one the initial addresses, then change the addresses and disconnect @@ -1212,7 +1212,7 @@ public void testFailedConnectionToFirstAddress() { NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE, reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, - time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, null, + time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // First connection attempt should fail @@ -1265,7 +1265,7 @@ public void testFailedConnectionToFirstAddressAfterReconnect() { NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE, reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, - time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, null, + time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, null, Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // Connect to one the initial addresses, then change the addresses and disconnect @@ -1374,7 +1374,7 @@ public void testTelemetryRequest() { NetworkClient client = new NetworkClient(metadataUpdater, null, selector, "mock", Integer.MAX_VALUE, reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, - time, true, new ApiVersions(), null, new LogContext(), new DefaultHostResolver(), mockClientTelemetrySender, null, null, + time, true, new ApiVersions(), null, new LogContext(), new DefaultHostResolver(), mockClientTelemetrySender, null, Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // Send the ApiVersionsRequest diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index 4ad2681de8f2d..0fd164ec3b272 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -1032,7 +1032,7 @@ private[kafka] class Processor( channel.channelMetadataRegistry.registerClientInformation(new ClientInformation( apiVersionsRequest.data.clientSoftwareName, apiVersionsRequest.data.clientSoftwareVersion, - apiVersionsRequest.data.clientSoftwareRole)) + null)) } } requestChannel.sendRequest(req) diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index a59bae567b013..e345797fb874c 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -66,7 +66,6 @@ import org.apache.kafka.server.authorizer._ import org.apache.kafka.server.common.{GroupVersion, RequestLocal, ShareVersion, StreamsVersion, TransactionVersion} import org.apache.kafka.server.config.ServerLogConfigs import org.apache.kafka.server.policy.ClientConfigPolicy -import org.apache.kafka.common.errors.PolicyViolationException import org.apache.kafka.server.share.context.ShareFetchContext import org.apache.kafka.server.share.{ErroneousAndValidPartitionData, SharePartitionKey} import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch @@ -80,7 +79,7 @@ import java.util import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.{CompletableFuture, ConcurrentHashMap} import java.util.stream.Collectors -import java.util.{Collections, Optional, UUID} +import java.util.{Collections, Optional} import scala.annotation.nowarn import scala.collection.mutable.ArrayBuffer import scala.collection.{Map, Seq, Set, mutable} @@ -235,7 +234,7 @@ class KafkaApis(val requestChannel: RequestChannel, case ApiKeys.CONSUMER_GROUP_HEARTBEAT => handleConsumerGroupHeartbeat(request).exceptionally(handleError) case ApiKeys.CONSUMER_GROUP_DESCRIBE => handleConsumerGroupDescribe(request).exceptionally(handleError) case ApiKeys.DESCRIBE_TOPIC_PARTITIONS => handleDescribeTopicPartitionsRequest(request) - case ApiKeys.GET_CONFIG_SUBSCRIPTION => handleGetConfigSubscriptionRequest(request) + case ApiKeys.GET_CONFIG_PROFILE_KEYS => handleGetConfigProfileKeysRequest(request) case ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS => handleGetTelemetrySubscriptionsRequest(request) case ApiKeys.PUSH_CONFIG => handlePushConfigRequest(request) case ApiKeys.PUSH_TELEMETRY => handlePushTelemetryRequest(request) @@ -2989,78 +2988,120 @@ class KafkaApis(val requestChannel: RequestChannel, } } - def handleGetConfigSubscriptionRequest(request: RequestChannel.Request): Unit = { + def handleGetConfigProfileKeysRequest(request: RequestChannel.Request): Unit = { authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) val clientInfo = request.context.clientInformation - val configKeys: Set[String] = clientConfigPolicy match { + // TODO: Extract clientInstanceId and clientMetadata from ApiVersionsRequest context + // For now, using placeholder values - need to update RequestContext/ClientInformation + // to store these fields from ApiVersionsRequest v5 + val clientInstanceId = org.apache.kafka.common.Uuid.ZERO_UUID // TODO: Get from context + val clientMetadata = new java.util.TreeMap[String, String]() // TODO: Get from context + + val clientProfile = new org.apache.kafka.server.policy.ClientProfile( + clientInstanceId, + clientInfo.softwareName, + clientInfo.softwareVersion, + clientMetadata + ) + + val (errorCode, errorMessage, crc, configKeys) = clientConfigPolicy match { case Some(policy) => - val metadata = new ClientConfigPolicy.GetConfigSubscriptionRequestMetadata(clientInfo) - val keys = policy.configKeysToRequest(metadata) - if (keys == null) Set.empty else keys.asScala.toSet + try { + val profileKeysOpt = policy.profileKeys(clientProfile) + OptionConverters.toScala(profileKeysOpt) match { + case Some(profileKeys) => + val keys: List[String] = profileKeys.keys().asScala.toList + (Errors.NONE.code, null, profileKeys.crc(), keys) + case None => + // No profile matched, return empty config keys + (Errors.NONE.code, null, 0L, List.empty[String]) + } + } catch { + case e: org.apache.kafka.common.errors.UnknownConfigProfileException => + (Errors.UNKNOWN_CONFIG_PROFILE.code, e.getMessage, 0L, List.empty[String]) + case e: Exception => + (Errors.UNKNOWN_SERVER_ERROR.code, e.getMessage, 0L, List.empty[String]) + } case None => - Set.empty + // No policy configured, return empty config keys + (Errors.NONE.code, null, 0L, List.empty[String]) } - val subscriptionId = math.abs(UUID.randomUUID().hashCode()) - val configNamesList = configKeys.map { name => - new GetConfigSubscriptionResponseData.ConfigKey().setName(name) - }.toList.asJava + val configKeysList = configKeys.asJava - val responseData = new GetConfigSubscriptionResponseData() - .setSubscriptionId(subscriptionId) + val responseData = new GetConfigProfileKeysResponseData() + .setErrorCode(errorCode.toShort) + .setErrorMessage(errorMessage) + .setConfigurationProfileCrc(crc) .setConfigMaxBytes(clientConfigMaxBytes) - .setConfigNames(configNamesList) + .setConfigKeys(configKeysList) requestHelper.sendResponseMaybeThrottle(request, - requestThrottleMs => new GetConfigSubscriptionResponse(responseData)) + requestThrottleMs => new GetConfigProfileKeysResponse(responseData.setThrottleTimeMs(requestThrottleMs))) } def handlePushConfigRequest(request: RequestChannel.Request): Unit = { authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) val clientInfo = request.context.clientInformation - val configsData = request.body[PushConfigRequest].data.configs.asScala - val configs = configsData.map(c => c.name -> c.value).toMap + val requestData = request.body[PushConfigRequest].data + val configsData = requestData.configs.asScala + val configs = configsData.map(c => c.configKey -> c.configValue).toMap // Calculate size val configsSize = configs.foldLeft(0)((acc, kv) => acc + kv._1.getBytes(StandardCharsets.UTF_8).length + kv._2.getBytes(StandardCharsets.UTF_8).length) - val responseData = if (configsSize > clientConfigMaxBytes) { - new PushConfigResponseData() - .setErrorCode(Errors.CONFIG_TOO_LARGE.code) + val (errorCode, errorMessage) = if (configsSize > clientConfigMaxBytes) { + (Errors.CONFIG_TOO_LARGE.code, s"Configuration payload size ($configsSize bytes) exceeds limit ($clientConfigMaxBytes bytes)") } else { clientConfigPolicy match { case Some(policy) => try { - val metadata = new ClientConfigPolicy.PushConfigRequestMetadata(clientInfo, configs.asJava) - policy.validate(metadata) - new PushConfigResponseData() - .setErrorCode(Errors.NONE.code) + // TODO: Extract clientInstanceId and clientMetadata from ApiVersionsRequest context + val clientInstanceId = org.apache.kafka.common.Uuid.ZERO_UUID // TODO: Get from context + val clientMetadata = new java.util.TreeMap[String, String]() // TODO: Get from context + + val clientProfile = new org.apache.kafka.server.policy.ClientProfile( + clientInstanceId, + clientInfo.softwareName, + clientInfo.softwareVersion, + clientMetadata + ) + + val timestamp = time.milliseconds() + val pushConfigData = new org.apache.kafka.server.policy.ClientPushConfigData( + clientProfile, + configs.asJava, + timestamp + ) + + policy.process(pushConfigData) + (Errors.NONE.code, null) } catch { - case e: PolicyViolationException => - // Parse validation errors into per-config errors - val configErrors = new util.ArrayList[PushConfigResponseData.ConfigError]() - // For now, single error - future: parse e.getMessage() for per-config details - configErrors.add(new PushConfigResponseData.ConfigError() - .setConfigKey("") - .setConfigErrorDescription(e.getMessage)) - - new PushConfigResponseData() - .setErrorCode(Errors.INVALID_CONFIG.code) - .setConfigErrors(configErrors) + case e: org.apache.kafka.common.errors.UnknownConfigProfileException => + (Errors.UNKNOWN_CONFIG_PROFILE.code, e.getMessage) + case e: org.apache.kafka.common.errors.ConfigTooLargeException => + (Errors.CONFIG_TOO_LARGE.code, e.getMessage) + case e: org.apache.kafka.common.errors.ClientConfigPolicyException => + (Errors.INVALID_CONFIG.code, e.getMessage) + case e: Exception => + (Errors.UNKNOWN_SERVER_ERROR.code, e.getMessage) } case None => // No policy configured, accept all - new PushConfigResponseData() - .setErrorCode(Errors.NONE.code) + (Errors.NONE.code, null) } } + val responseData = new PushConfigResponseData() + .setErrorCode(errorCode) + .setErrorMessage(errorMessage) + requestHelper.sendResponseMaybeThrottle(request, - requestThrottleMs => new PushConfigResponse(responseData)) + requestThrottleMs => new PushConfigResponse(responseData.setThrottleTimeMs(requestThrottleMs))) } /** diff --git a/server-common/src/main/java/org/apache/kafka/server/config/ServerLogConfigs.java b/server-common/src/main/java/org/apache/kafka/server/config/ServerLogConfigs.java index a48c0abe07a94..453dae17da34b 100644 --- a/server-common/src/main/java/org/apache/kafka/server/config/ServerLogConfigs.java +++ b/server-common/src/main/java/org/apache/kafka/server/config/ServerLogConfigs.java @@ -169,7 +169,7 @@ public class ServerLogConfigs { "The class should implement the org.apache.kafka.server.policy.ClientConfigPolicy interface."; public static final String CLIENT_CONFIG_MAX_BYTES_CONFIG = "client.config.max.bytes"; - public static final int CLIENT_CONFIG_MAX_BYTES_DEFAULT = 10240; // 10KB + public static final int CLIENT_CONFIG_MAX_BYTES_DEFAULT = 1048576; // 1MB public static final String CLIENT_CONFIG_MAX_BYTES_DOC = "Maximum size in bytes for client configuration data in PushConfig requests. " + "Requests exceeding this limit will be rejected with CONFIG_TOO_LARGE error."; From 4901900f9bd8c9d7c570d63dac6a339498c0f3cb Mon Sep 17 00:00:00 2001 From: Kirk True Date: Fri, 27 Mar 2026 10:24:09 -0700 Subject: [PATCH 10/10] getting things to compile --- .../ClientConfigPushIntegrationTest.java | 44 +++++++++---------- .../kafka/clients/ClientsTestUtils.java | 2 - 2 files changed, 21 insertions(+), 25 deletions(-) diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientConfigPushIntegrationTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientConfigPushIntegrationTest.java index 5e344d3723040..fee93d640df1c 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientConfigPushIntegrationTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientConfigPushIntegrationTest.java @@ -37,7 +37,6 @@ import org.apache.kafka.server.policy.ClientConfigProfileKeys; import org.apache.kafka.server.policy.ClientProfile; import org.apache.kafka.server.policy.ClientPushConfigData; -f import org.junit.jupiter.api.BeforeEach; import java.util.Collections; @@ -83,11 +82,7 @@ public ClientConfigPushIntegrationTest(ClusterInstance clusterInstance) { @BeforeEach public void setup() throws InterruptedException { clusterInstance.waitForReadyBrokers(); - // Reset test state - receivedProfiles.clear(); - receivedConfigs.clear(); - profileKeysCallCount.set(0); - processCallCount.set(0); + clientConfigPolicy = new TestClientConfigPolicy(); } @ClusterTest @@ -104,17 +99,17 @@ public void testProducerConfigPushHandshake() throws Exception { Thread.sleep(2000); // Verify policy was called - assertTrue(profileKeysCallCount.get() > 0, "profileKeys() should have been called"); - assertTrue(processCallCount.get() > 0, "process() should have been called"); + assertTrue(clientConfigPolicy.profileKeysCallCount.get() > 0, "profileKeys() should have been called"); + assertTrue(clientConfigPolicy.processCallCount.get() > 0, "process() should have been called"); // Verify we received a client profile - assertFalse(receivedProfiles.isEmpty(), "Should have received client profile"); + assertFalse(clientConfigPolicy.receivedProfiles.isEmpty(), "Should have received client profile"); // Verify we received configs - assertFalse(receivedConfigs.isEmpty(), "Should have received client configs"); + assertFalse(clientConfigPolicy.receivedConfigs.isEmpty(), "Should have received client configs"); // Verify configs were collected (should have client.id but not bootstrap.servers) - Map configs = receivedConfigs.values().iterator().next(); + Map configs = clientConfigPolicy.receivedConfigs.values().iterator().next(); assertTrue(configs.containsKey(ProducerConfig.CLIENT_ID_CONFIG), "Should contain client.id config"); assertFalse(configs.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG), @@ -137,13 +132,13 @@ public void testConsumerConfigPushHandshake() throws Exception { Thread.sleep(2000); // Verify policy was called - assertTrue(profileKeysCallCount.get() > 0, "profileKeys() should have been called"); - assertTrue(processCallCount.get() > 0, "process() should have been called"); + assertTrue(clientConfigPolicy.profileKeysCallCount.get() > 0, "profileKeys() should have been called"); + assertTrue(clientConfigPolicy.processCallCount.get() > 0, "process() should have been called"); // Verify we received configs - assertFalse(receivedConfigs.isEmpty(), "Should have received client configs"); + assertFalse(clientConfigPolicy.receivedConfigs.isEmpty(), "Should have received client configs"); - Map configs = receivedConfigs.values().iterator().next(); + Map configs = clientConfigPolicy.receivedConfigs.values().iterator().next(); assertTrue(configs.containsKey(ConsumerConfig.GROUP_ID_CONFIG), "Should contain group.id config"); assertTrue(configs.containsKey(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), @@ -154,7 +149,7 @@ public void testConsumerConfigPushHandshake() throws Exception { @ClusterTest public void testConfigPushWithEmptyProfile() throws Exception { // Configure policy to return empty profile (no config keys requested) - returnEmptyProfile = true; + clientConfigPolicy.returnEmptyProfile = true; Map producerConfig = new java.util.HashMap<>(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); @@ -167,10 +162,10 @@ public void testConfigPushWithEmptyProfile() throws Exception { Thread.sleep(2000); // profileKeys() should be called - assertTrue(profileKeysCallCount.get() > 0, "profileKeys() should have been called"); + assertTrue(clientConfigPolicy.profileKeysCallCount.get() > 0, "profileKeys() should have been called"); // But process() should NOT be called since no configs were requested - assertEquals(0, processCallCount.get(), "process() should NOT have been called with empty profile"); + assertEquals(0, clientConfigPolicy.processCallCount.get(), "process() should NOT have been called with empty profile"); } } @@ -188,8 +183,8 @@ public void testConfigPushDisabled() throws Exception { Thread.sleep(2000); // Policy should NOT be called when disabled - assertEquals(0, profileKeysCallCount.get(), "profileKeys() should NOT be called when disabled"); - assertEquals(0, processCallCount.get(), "process() should NOT be called when disabled"); + assertEquals(0, clientConfigPolicy.profileKeysCallCount.get(), "profileKeys() should NOT be called when disabled"); + assertEquals(0, clientConfigPolicy.processCallCount.get(), "process() should NOT be called when disabled"); } } @@ -205,8 +200,8 @@ public void testClientProfileContainsMetadata() throws Exception { try (Producer producer = new KafkaProducer<>(producerConfig)) { Thread.sleep(2000); - assertFalse(receivedProfiles.isEmpty(), "Should have received client profile"); - ClientProfile profile = receivedProfiles.values().iterator().next(); + assertFalse(clientConfigPolicy.receivedProfiles.isEmpty(), "Should have received client profile"); + ClientProfile profile = clientConfigPolicy.receivedProfiles.values().iterator().next(); assertNotNull(profile.clientInstanceId(), "Client instance ID should not be null"); assertNotNull(profile.clientSoftwareName(), "Client software name should not be null"); @@ -231,7 +226,7 @@ public void testCrcBasedProfileChangeDetection() throws Exception { Thread.sleep(2000); // Verify CRC was computed and is non-zero - assertTrue(profileKeysCallCount.get() > 0, "profileKeys() should have been called"); + assertTrue(clientConfigPolicy.profileKeysCallCount.get() > 0, "profileKeys() should have been called"); // The CRC should be deterministic based on the keys // (We can't easily verify the exact value here, but we verified it's used) @@ -252,6 +247,9 @@ public static class TestClientConfigPolicy implements ClientConfigPolicy { private final Map> receivedConfigs = new ConcurrentHashMap<>(); private final AtomicInteger profileKeysCallCount = new AtomicInteger(0); private final AtomicInteger processCallCount = new AtomicInteger(0); + private boolean returnEmptyProfile; + private boolean throwUnknownProfileOnKeys; + private boolean rejectNextPush; private final SortedSet standardConfigKeys = new TreeSet<>(Set.of( "client.id", diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientsTestUtils.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientsTestUtils.java index a41450e09f098..5635f6e36b756 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientsTestUtils.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientsTestUtils.java @@ -59,8 +59,6 @@ public class ClientsTestUtils { - fasdfsdadfsa; - private static final String KEY_PREFIX = "key "; private static final String VALUE_PREFIX = "value ";