final case class ConsumerSettings(properties: Map[String, AnyRef] = Map.empty, closeTimeout: zio.Duration = 30.seconds, pollTimeout: zio.Duration = 50.millis, commitTimeout: zio.Duration = ConsumerSettings.defaultCommitTimeout, offsetRetrieval: OffsetRetrieval = OffsetRetrieval.Auto(), rebalanceListener: RebalanceListener = RebalanceListener.noop, restartStreamOnRebalancing: Boolean = false, rebalanceSafeCommits: Boolean = false, maxRebalanceDuration: Option[zio.Duration] = None, fetchStrategy: FetchStrategy = QueueSizeBasedFetchStrategy(), metricLabels: Set[MetricLabel] = Set.empty, runloopMetricsSchedule: Schedule[Any, Unit, Long] = Schedule.fixed(500.millis), authErrorRetrySchedule: Schedule[Any, Throwable, Any] = Schedule.recurs(5) && Schedule.spaced(500.millis), maxStreamPullIntervalOption: Option[zio.Duration] = None) extends Product with Serializable
Settings for the consumer.
To stay source compatible with future releases, you are recommended to construct the settings as follows:
ConsumerSettings(bootstrapServers) .withGroupId(groupId) .withProperties(properties) .... etc.
- Alphabetic
- By Inheritance
- ConsumerSettings
- Serializable
- Product
- Equals
- AnyRef
- Any
- Hide All
- Show All
- Public
- Protected
Instance Constructors
- new ConsumerSettings(properties: Map[String, AnyRef] = Map.empty, closeTimeout: zio.Duration = 30.seconds, pollTimeout: zio.Duration = 50.millis, commitTimeout: zio.Duration = ConsumerSettings.defaultCommitTimeout, offsetRetrieval: OffsetRetrieval = OffsetRetrieval.Auto(), rebalanceListener: RebalanceListener = RebalanceListener.noop, restartStreamOnRebalancing: Boolean = false, rebalanceSafeCommits: Boolean = false, maxRebalanceDuration: Option[zio.Duration] = None, fetchStrategy: FetchStrategy = QueueSizeBasedFetchStrategy(), metricLabels: Set[MetricLabel] = Set.empty, runloopMetricsSchedule: Schedule[Any, Unit, Long] = Schedule.fixed(500.millis), authErrorRetrySchedule: Schedule[Any, Throwable, Any] = Schedule.recurs(5) && Schedule.spaced(500.millis), maxStreamPullIntervalOption: Option[zio.Duration] = None)
Value Members
- final def !=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def ##: Int
- Definition Classes
- AnyRef → Any
- final def ==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def asInstanceOf[T0]: T0
- Definition Classes
- Any
- val authErrorRetrySchedule: Schedule[Any, Throwable, Any]
- def clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.CloneNotSupportedException]) @IntrinsicCandidate() @native()
- val closeTimeout: zio.Duration
- val commitTimeout: zio.Duration
- def driverSettings: Map[String, AnyRef]
- final def eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- val fetchStrategy: FetchStrategy
- final def getClass(): Class[_ <: AnyRef]
- Definition Classes
- AnyRef → Any
- Annotations
- @IntrinsicCandidate() @native()
- final def isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- val maxRebalanceDuration: Option[zio.Duration]
- val maxStreamPullIntervalOption: Option[zio.Duration]
- val metricLabels: Set[MetricLabel]
- final def ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- final def notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @IntrinsicCandidate() @native()
- final def notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @IntrinsicCandidate() @native()
- val offsetRetrieval: OffsetRetrieval
- val pollTimeout: zio.Duration
- def productElementNames: Iterator[String]
- Definition Classes
- Product
- val properties: Map[String, AnyRef]
- val rebalanceListener: RebalanceListener
- val rebalanceSafeCommits: Boolean
- val restartStreamOnRebalancing: Boolean
- val runloopMetricsSchedule: Schedule[Any, Unit, Long]
- final def synchronized[T0](arg0: => T0): T0
- Definition Classes
- AnyRef
- def tuneForHighThroughput: ConsumerSettings
Tunes the consumer for high throughput.
Tunes the consumer for high throughput.
Sets poll timeout to 500 ms,
max.poll.recordsto 2000 andpartitionPreFetchBufferLimitto 4096. - def tuneForLowLatency: ConsumerSettings
Tunes the consumer for low latency.
Tunes the consumer for low latency.
Sets poll timeout to 50 ms,
max.poll.recordsto 100 andpartitionPreFetchBufferLimitto 512. - final def wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- final def wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException]) @native()
- final def wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- def withAuthErrorRetrySchedule(authErrorRetrySchedule: Schedule[Any, Throwable, Any]): ConsumerSettings
- authErrorRetrySchedule
The schedule at which the consumer will retry polling the broker for more records, even though a poll fails with an org.apache.kafka.common.errors.AuthorizationException or org.apache.kafka.common.errors.AuthenticationException. This setting helps with failed polls due to too slow authorization or authentication in the broker. You may also consider increasing
pollTimeoutto reduce auth-work on the broker. Set toSchedule.stopto fail the consumer on the first auth error. The default isSchedule.recurs(5) && Schedule.spaced(500.millis)
which is, to retry 5 times, spaced by 500ms.
- def withBootstrapServers(servers: List[String]): ConsumerSettings
- def withClientId(clientId: String): ConsumerSettings
- def withCloseTimeout(timeout: zio.Duration): ConsumerSettings
- def withCommitTimeout(timeout: zio.Duration): ConsumerSettings
- def withCredentials(credentialsStore: KafkaCredentialStore): ConsumerSettings
- def withFetchStrategy(fetchStrategy: FetchStrategy): ConsumerSettings
- fetchStrategy
The fetch strategy which selects which partitions can fetch data in the next poll. The default is to use the zio.kafka.consumer.fetch.QueueSizeBasedFetchStrategy with a
partitionPreFetchBufferLimitparameter of 1024, which is calculated by taking 2 * the defaultmax.poll.recordsof 500, rounded to the nearest power of 2.
- def withGroupId(groupId: String): ConsumerSettings
- def withGroupInstanceId(groupInstanceId: String): ConsumerSettings
- def withMaxPollInterval(maxPollInterval: zio.Duration): ConsumerSettings
Set Kafka's
max.poll.interval.msconfiguration.Set Kafka's
max.poll.interval.msconfiguration. See https://kafka.apache.org/documentation/#consumerconfigs_max.poll.interval.ms for more information.The default is 5 minutes. Make sure that all records from a single poll can be processed in this interval. See also the maxPollRecords configuration.
- def withMaxPollRecords(maxPollRecords: Int): ConsumerSettings
Set Kafka's
max.poll.recordsconfiguration.Set Kafka's
max.poll.recordsconfiguration. See https://kafka.apache.org/documentation/#consumerconfigs_max.poll.records for more information.The default is 500.
- def withMaxRebalanceDuration(value: zio.Duration): ConsumerSettings
- value
Maximum time spent in the rebalance callback when
rebalanceSafeCommitsis enabled. In this time zio-kafka awaits processing of records and the completion of commits. By default this value is set to 3/5 ofmaxPollIntervalwhich by default calculates to 3 minutes. Only values betweencommitTimeoutandmaxPollIntervalare useful. Lower values will make the rebalance callback be done immediately, higher values lead to lost partitions. See #withRebalanceSafeCommits for more information.
- def withMaxStreamPullInterval(maxStreamPullInterval: zio.Duration): ConsumerSettings
The maximum time a stream may run without pulling a chunk of records.
The maximum time a stream may run without pulling a chunk of records.
Zio-kafka uses this value to determine whether a stream stopped processing. This is to safeguard against alive consumers in the consumer group which hold partition assignments but make no progress. If no chunks are pulled by user code from a partition stream for this interval (while data is available) we consider the stream to be halted. When this happens we interrupt the stream with a failure. In addition, the entire consumer is shutdown. In future versions of zio-kafka we may (instead of a shutdown) stop only the affected subscription.
Make sure that all records from a single poll (see maxPollRecords) can be processed in this interval, even when there is no concurrency because the records are all in the same partition.
The default is equal to maxPollInterval).
- def withMetricsLabels(metricLabels: Set[MetricLabel]): ConsumerSettings
- metricLabels
The labels given to all metrics collected by zio-kafka. By default no labels are set. For applications with multiple consumers it is recommended to set some metric labels. For example, if one is used, the consumer group id could be used as a label:
consumerSettings.withMetricLabels(Set(MetricLabel("group-id", groupId)))
- def withOffsetRetrieval(retrieval: OffsetRetrieval): ConsumerSettings
Which offset to start consuming from for new partitions.
Which offset to start consuming from for new partitions.
The options are:
import zio.kafka.consumer.Consumer._ OffsetRetrieval.Auto(AutoOffsetStrategy.Latest) // the default OffsetRetrieval.Auto(AutoOffsetStrategy.Earliest) OffsetRetrieval.Auto(AutoOffsetStrategy.None) OffsetRetrieval.Manual(getOffsets, defaultStrategy)
The
Autooptions make consuming start from the latest committed offset. When no committed offset is available, the given offset strategy is used and consuming starts from theLatestoffset (the default), theEarliestoffset, or results in an error forNone.The
Manualoption allows fine grained control over which offset to consume from. The providedgetOffsetsfunction should return an offset for each topic-partition that is being assigned. When the returned offset is smaller than the log start offset or larger than the log end offset, thedefaultStrategyis used and consuming starts from theLatestoffset (the default), theEarliestoffset, or results in an error forNone.When the returned map does not contain an entry for a topic-partition, the consumer will continue from the last committed offset. When no committed offset is available, the
defaultStrategyis used and consuming starts from theLatestoffset (the default), theEarliestoffset, or results in an error forNone.This configuration applies to both subscribed and assigned partitions.
This method sets the
auto.offset.resetKafka configuration. See https://kafka.apache.org/documentation/#consumerconfigs_auto.offset.reset for more information. - def withPartitionPreFetchBufferLimit(partitionPreFetchBufferLimit: Int): ConsumerSettings
- partitionPreFetchBufferLimit
The queue size at or below which more records are fetched and buffered (per partition). This buffer improves throughput and supports varying downstream message processing time, while maintaining some backpressure. Large values effectively disable backpressure at the cost of high memory usage, low values will effectively disable prefetching in favor of low memory consumption. The number of records that is fetched on every poll is controlled by the
max.poll.recordssetting, the number of records fetched for every partition is somewhere between 0 andmax.poll.records. The default value for this parameter is 1024. It is calculated by taking 2 * the defaultmax.poll.recordsof 500, rounded to the nearest power of 2. The value0disables pre-fetching.
- def withPollTimeout(timeout: zio.Duration): ConsumerSettings
The maximum time to block while polling the Kafka consumer.
The maximum time to block while polling the Kafka consumer. The Kafka consumer will return earlier when the maximum number of record to poll (see https://kafka.apache.org/documentation/#consumerconfigs_max.poll.records) is collected.
The default is
50mswhich is good for low latency applications. Set this higher, e.g.500msfor better throughput. - def withProperties(kvs: Map[String, AnyRef]): ConsumerSettings
- def withProperties(kvs: (String, AnyRef)*): ConsumerSettings
- def withProperty(key: String, value: AnyRef): ConsumerSettings
- def withReadCommitted(readCommitted: Boolean = true): ConsumerSettings
Controls how to consume records produced transactionally.
Controls how to consume records produced transactionally.
- readCommitted
when
true, only consume records which have been committed, whenfalse, consume all records, even records which are part of an aborted transaction. Non-transactional records will be consumed unconditionally in either mode. Note that Kafka's default is to read all records (readCommitted = false).
- def withRebalanceListener(listener: RebalanceListener): ConsumerSettings
- def withRebalanceSafeCommits(value: Boolean): ConsumerSettings
- value
Whether to hold up a rebalance until all offsets of consumed messages have been committed. The default is
false, but the recommended value istrueas it prevents duplicate messages. Usefalsewhen:- your streams do not commit, or
- your streams require access to the consumer (the consumer is not available until the rebalance is done), or
- when it is okay to process records twice (possibly concurrently), for example, because processing is
idempotent.
When
true, messages consumed from revoked partitions must be committed before we allow the rebalance to continue. When a partition is revoked, consuming the messages will be taken over by another consumer. The other consumer will continue from the committed offset. It is therefore important that this consumer commits offsets of all consumed messages. Therefore, by holding up the rebalance until these commits are done, we ensure that the new consumer will start from the correct offset. During a rebalance no new messages can be received _for any stream_. Therefore, _all_ streams are deprived of new messages until the revoked streams are ready committing. Rebalances are held up for at most 3/5 ofmaxPollInterval(see withMaxPollInterval), by default this calculates to 3 minutes. See #withMaxRebalanceDuration to change the default. External commits (that is, commits to an external system, e.g. a relational database) must be registered to the consumer with Consumer.registerExternalCommits. Whenfalse, streams for revoked partitions may continue to run even though the rebalance is not held up. Any offset commits from these streams have a high chance of being delayed (commits are not possible during some phases of a rebalance). The consumer that takes over the partition will likely not see these delayed commits and will start from an earlier offset. The result is that some messages are processed twice and concurrently.
- def withRunloopMetricsSchedule(runloopMetricsSchedule: Schedule[Any, Unit, Long]): ConsumerSettings
- runloopMetricsSchedule
The schedule at which the runloop metrics are measured. Example runloop metrics are queue sizes and number of outstanding commits. The default is to measure every 500ms.
- def withoutPartitionPreFetching: ConsumerSettings
Disables partition record pre-fetching.
Deprecated Value Members
- def finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.Throwable]) @Deprecated
- Deprecated
(Since version 9)
- def withMaxPartitionQueueSize(partitionPreFetchBufferLimit: Int): ConsumerSettings
- Annotations
- @deprecated
- Deprecated
(Since version 2.6.0) Use withPartitionPreFetchBufferLimit instead
- def withRestartStreamOnRebalancing(value: Boolean): ConsumerSettings
- value
When
true_all_ streams are restarted during a rebalance, including those streams that are not revoked. The default isfalse.
- Annotations
- @deprecated
- Deprecated
(Since version 2.10.0)
restartStreamOnRebalancingwill be removed in zio-kafka 3.0