Packages

final case class ConsumerSettings(properties: Map[String, AnyRef] = Map.empty, closeTimeout: zio.Duration = 30.seconds, pollTimeout: zio.Duration = 50.millis, commitTimeout: zio.Duration = ConsumerSettings.defaultCommitTimeout, offsetRetrieval: OffsetRetrieval = OffsetRetrieval.Auto(), rebalanceListener: RebalanceListener = RebalanceListener.noop, restartStreamOnRebalancing: Boolean = false, rebalanceSafeCommits: Boolean = false, maxRebalanceDuration: Option[zio.Duration] = None, fetchStrategy: FetchStrategy = QueueSizeBasedFetchStrategy(), metricLabels: Set[MetricLabel] = Set.empty, runloopMetricsSchedule: Schedule[Any, Unit, Long] = Schedule.fixed(500.millis), authErrorRetrySchedule: Schedule[Any, Throwable, Any] = Schedule.recurs(5) && Schedule.spaced(500.millis), maxStreamPullIntervalOption: Option[zio.Duration] = None) extends Product with Serializable

Settings for the consumer.

To stay source compatible with future releases, you are recommended to construct the settings as follows:

ConsumerSettings(bootstrapServers)
  .withGroupId(groupId)
  .withProperties(properties)
  .... etc.
Linear Supertypes
Ordering
  1. Alphabetic
  2. By Inheritance
Inherited
  1. ConsumerSettings
  2. Serializable
  3. Product
  4. Equals
  5. AnyRef
  6. Any
  1. Hide All
  2. Show All
Visibility
  1. Public
  2. Protected

Instance Constructors

  1. new ConsumerSettings(properties: Map[String, AnyRef] = Map.empty, closeTimeout: zio.Duration = 30.seconds, pollTimeout: zio.Duration = 50.millis, commitTimeout: zio.Duration = ConsumerSettings.defaultCommitTimeout, offsetRetrieval: OffsetRetrieval = OffsetRetrieval.Auto(), rebalanceListener: RebalanceListener = RebalanceListener.noop, restartStreamOnRebalancing: Boolean = false, rebalanceSafeCommits: Boolean = false, maxRebalanceDuration: Option[zio.Duration] = None, fetchStrategy: FetchStrategy = QueueSizeBasedFetchStrategy(), metricLabels: Set[MetricLabel] = Set.empty, runloopMetricsSchedule: Schedule[Any, Unit, Long] = Schedule.fixed(500.millis), authErrorRetrySchedule: Schedule[Any, Throwable, Any] = Schedule.recurs(5) && Schedule.spaced(500.millis), maxStreamPullIntervalOption: Option[zio.Duration] = None)

Value Members

  1. final def !=(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  2. final def ##: Int
    Definition Classes
    AnyRef → Any
  3. final def ==(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  4. final def asInstanceOf[T0]: T0
    Definition Classes
    Any
  5. val authErrorRetrySchedule: Schedule[Any, Throwable, Any]
  6. def clone(): AnyRef
    Attributes
    protected[lang]
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.CloneNotSupportedException]) @IntrinsicCandidate() @native()
  7. val closeTimeout: zio.Duration
  8. val commitTimeout: zio.Duration
  9. def driverSettings: Map[String, AnyRef]
  10. final def eq(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  11. val fetchStrategy: FetchStrategy
  12. final def getClass(): Class[_ <: AnyRef]
    Definition Classes
    AnyRef → Any
    Annotations
    @IntrinsicCandidate() @native()
  13. final def isInstanceOf[T0]: Boolean
    Definition Classes
    Any
  14. val maxRebalanceDuration: Option[zio.Duration]
  15. val maxStreamPullIntervalOption: Option[zio.Duration]
  16. val metricLabels: Set[MetricLabel]
  17. final def ne(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  18. final def notify(): Unit
    Definition Classes
    AnyRef
    Annotations
    @IntrinsicCandidate() @native()
  19. final def notifyAll(): Unit
    Definition Classes
    AnyRef
    Annotations
    @IntrinsicCandidate() @native()
  20. val offsetRetrieval: OffsetRetrieval
  21. val pollTimeout: zio.Duration
  22. def productElementNames: Iterator[String]
    Definition Classes
    Product
  23. val properties: Map[String, AnyRef]
  24. val rebalanceListener: RebalanceListener
  25. val rebalanceSafeCommits: Boolean
  26. val restartStreamOnRebalancing: Boolean
  27. val runloopMetricsSchedule: Schedule[Any, Unit, Long]
  28. final def synchronized[T0](arg0: => T0): T0
    Definition Classes
    AnyRef
  29. def tuneForHighThroughput: ConsumerSettings

    Tunes the consumer for high throughput.

    Tunes the consumer for high throughput.

    Sets poll timeout to 500 ms, max.poll.records to 2000 and partitionPreFetchBufferLimit to 4096.

    See also

    ConsumerSettings.withPollTimeout

    ConsumerSettings.withMaxPollRecords

    zio.kafka.consumer.fetch.QueueSizeBasedFetchStrategy

  30. def tuneForLowLatency: ConsumerSettings

    Tunes the consumer for low latency.

    Tunes the consumer for low latency.

    Sets poll timeout to 50 ms, max.poll.records to 100 and partitionPreFetchBufferLimit to 512.

    See also

    ConsumerSettings.withPollTimeout

    ConsumerSettings.withMaxPollRecords

    zio.kafka.consumer.fetch.QueueSizeBasedFetchStrategy

  31. final def wait(arg0: Long, arg1: Int): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.InterruptedException])
  32. final def wait(arg0: Long): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.InterruptedException]) @native()
  33. final def wait(): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.InterruptedException])
  34. def withAuthErrorRetrySchedule(authErrorRetrySchedule: Schedule[Any, Throwable, Any]): ConsumerSettings

    authErrorRetrySchedule

    The schedule at which the consumer will retry polling the broker for more records, even though a poll fails with an org.apache.kafka.common.errors.AuthorizationException or org.apache.kafka.common.errors.AuthenticationException. This setting helps with failed polls due to too slow authorization or authentication in the broker. You may also consider increasing pollTimeout to reduce auth-work on the broker. Set to Schedule.stop to fail the consumer on the first auth error. The default is

    Schedule.recurs(5) && Schedule.spaced(500.millis)

    which is, to retry 5 times, spaced by 500ms.

  35. def withBootstrapServers(servers: List[String]): ConsumerSettings
  36. def withClientId(clientId: String): ConsumerSettings
  37. def withCloseTimeout(timeout: zio.Duration): ConsumerSettings
  38. def withCommitTimeout(timeout: zio.Duration): ConsumerSettings
  39. def withCredentials(credentialsStore: KafkaCredentialStore): ConsumerSettings
  40. def withFetchStrategy(fetchStrategy: FetchStrategy): ConsumerSettings

    fetchStrategy

    The fetch strategy which selects which partitions can fetch data in the next poll. The default is to use the zio.kafka.consumer.fetch.QueueSizeBasedFetchStrategy with a partitionPreFetchBufferLimit parameter of 1024, which is calculated by taking 2 * the default max.poll.records of 500, rounded to the nearest power of 2.

  41. def withGroupId(groupId: String): ConsumerSettings
  42. def withGroupInstanceId(groupInstanceId: String): ConsumerSettings
  43. def withMaxPollInterval(maxPollInterval: zio.Duration): ConsumerSettings

    Set Kafka's max.poll.interval.ms configuration.

    Set Kafka's max.poll.interval.ms configuration. See https://kafka.apache.org/documentation/#consumerconfigs_max.poll.interval.ms for more information.

    The default is 5 minutes. Make sure that all records from a single poll can be processed in this interval. See also the maxPollRecords configuration.

  44. def withMaxPollRecords(maxPollRecords: Int): ConsumerSettings

    Set Kafka's max.poll.records configuration.

    Set Kafka's max.poll.records configuration. See https://kafka.apache.org/documentation/#consumerconfigs_max.poll.records for more information.

    The default is 500.

  45. def withMaxRebalanceDuration(value: zio.Duration): ConsumerSettings

    value

    Maximum time spent in the rebalance callback when rebalanceSafeCommits is enabled. In this time zio-kafka awaits processing of records and the completion of commits. By default this value is set to 3/5 of maxPollInterval which by default calculates to 3 minutes. Only values between commitTimeout and maxPollInterval are useful. Lower values will make the rebalance callback be done immediately, higher values lead to lost partitions. See #withRebalanceSafeCommits for more information.

  46. def withMaxStreamPullInterval(maxStreamPullInterval: zio.Duration): ConsumerSettings

    The maximum time a stream may run without pulling a chunk of records.

    The maximum time a stream may run without pulling a chunk of records.

    Zio-kafka uses this value to determine whether a stream stopped processing. This is to safeguard against alive consumers in the consumer group which hold partition assignments but make no progress. If no chunks are pulled by user code from a partition stream for this interval (while data is available) we consider the stream to be halted. When this happens we interrupt the stream with a failure. In addition, the entire consumer is shutdown. In future versions of zio-kafka we may (instead of a shutdown) stop only the affected subscription.

    Make sure that all records from a single poll (see maxPollRecords) can be processed in this interval, even when there is no concurrency because the records are all in the same partition.

    The default is equal to maxPollInterval).

  47. def withMetricsLabels(metricLabels: Set[MetricLabel]): ConsumerSettings

    metricLabels

    The labels given to all metrics collected by zio-kafka. By default no labels are set. For applications with multiple consumers it is recommended to set some metric labels. For example, if one is used, the consumer group id could be used as a label:

    consumerSettings.withMetricLabels(Set(MetricLabel("group-id", groupId)))
  48. def withOffsetRetrieval(retrieval: OffsetRetrieval): ConsumerSettings

    Which offset to start consuming from for new partitions.

    Which offset to start consuming from for new partitions.

    The options are:

    import zio.kafka.consumer.Consumer._
    OffsetRetrieval.Auto(AutoOffsetStrategy.Latest) // the default
    OffsetRetrieval.Auto(AutoOffsetStrategy.Earliest)
    OffsetRetrieval.Auto(AutoOffsetStrategy.None)
    OffsetRetrieval.Manual(getOffsets, defaultStrategy)

    The Auto options make consuming start from the latest committed offset. When no committed offset is available, the given offset strategy is used and consuming starts from the Latest offset (the default), the Earliest offset, or results in an error for None.

    The Manual option allows fine grained control over which offset to consume from. The provided getOffsets function should return an offset for each topic-partition that is being assigned. When the returned offset is smaller than the log start offset or larger than the log end offset, the defaultStrategy is used and consuming starts from the Latest offset (the default), the Earliest offset, or results in an error for None.

    When the returned map does not contain an entry for a topic-partition, the consumer will continue from the last committed offset. When no committed offset is available, the defaultStrategy is used and consuming starts from the Latest offset (the default), the Earliest offset, or results in an error for None.

    This configuration applies to both subscribed and assigned partitions.

    This method sets the auto.offset.reset Kafka configuration. See https://kafka.apache.org/documentation/#consumerconfigs_auto.offset.reset for more information.

  49. def withPartitionPreFetchBufferLimit(partitionPreFetchBufferLimit: Int): ConsumerSettings

    partitionPreFetchBufferLimit

    The queue size at or below which more records are fetched and buffered (per partition). This buffer improves throughput and supports varying downstream message processing time, while maintaining some backpressure. Large values effectively disable backpressure at the cost of high memory usage, low values will effectively disable prefetching in favor of low memory consumption. The number of records that is fetched on every poll is controlled by the max.poll.records setting, the number of records fetched for every partition is somewhere between 0 and max.poll.records. The default value for this parameter is 1024. It is calculated by taking 2 * the default max.poll.records of 500, rounded to the nearest power of 2. The value 0 disables pre-fetching.

  50. def withPollTimeout(timeout: zio.Duration): ConsumerSettings

    The maximum time to block while polling the Kafka consumer.

    The maximum time to block while polling the Kafka consumer. The Kafka consumer will return earlier when the maximum number of record to poll (see https://kafka.apache.org/documentation/#consumerconfigs_max.poll.records) is collected.

    The default is 50ms which is good for low latency applications. Set this higher, e.g. 500ms for better throughput.

  51. def withProperties(kvs: Map[String, AnyRef]): ConsumerSettings
  52. def withProperties(kvs: (String, AnyRef)*): ConsumerSettings
  53. def withProperty(key: String, value: AnyRef): ConsumerSettings
  54. def withReadCommitted(readCommitted: Boolean = true): ConsumerSettings

    Controls how to consume records produced transactionally.

    Controls how to consume records produced transactionally.

    readCommitted

    when true, only consume records which have been committed, when false, consume all records, even records which are part of an aborted transaction. Non-transactional records will be consumed unconditionally in either mode. Note that Kafka's default is to read all records (readCommitted = false).

  55. def withRebalanceListener(listener: RebalanceListener): ConsumerSettings
  56. def withRebalanceSafeCommits(value: Boolean): ConsumerSettings

    value

    Whether to hold up a rebalance until all offsets of consumed messages have been committed. The default is false, but the recommended value is true as it prevents duplicate messages. Use false when:

    • your streams do not commit, or
    • your streams require access to the consumer (the consumer is not available until the rebalance is done), or
    • when it is okay to process records twice (possibly concurrently), for example, because processing is idempotent. When true, messages consumed from revoked partitions must be committed before we allow the rebalance to continue. When a partition is revoked, consuming the messages will be taken over by another consumer. The other consumer will continue from the committed offset. It is therefore important that this consumer commits offsets of all consumed messages. Therefore, by holding up the rebalance until these commits are done, we ensure that the new consumer will start from the correct offset. During a rebalance no new messages can be received _for any stream_. Therefore, _all_ streams are deprived of new messages until the revoked streams are ready committing. Rebalances are held up for at most 3/5 of maxPollInterval (see withMaxPollInterval), by default this calculates to 3 minutes. See #withMaxRebalanceDuration to change the default. External commits (that is, commits to an external system, e.g. a relational database) must be registered to the consumer with Consumer.registerExternalCommits. When false, streams for revoked partitions may continue to run even though the rebalance is not held up. Any offset commits from these streams have a high chance of being delayed (commits are not possible during some phases of a rebalance). The consumer that takes over the partition will likely not see these delayed commits and will start from an earlier offset. The result is that some messages are processed twice and concurrently.
  57. def withRunloopMetricsSchedule(runloopMetricsSchedule: Schedule[Any, Unit, Long]): ConsumerSettings

    runloopMetricsSchedule

    The schedule at which the runloop metrics are measured. Example runloop metrics are queue sizes and number of outstanding commits. The default is to measure every 500ms.

  58. def withoutPartitionPreFetching: ConsumerSettings

    Disables partition record pre-fetching.

Deprecated Value Members

  1. def finalize(): Unit
    Attributes
    protected[lang]
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.Throwable]) @Deprecated
    Deprecated

    (Since version 9)

  2. def withMaxPartitionQueueSize(partitionPreFetchBufferLimit: Int): ConsumerSettings
    Annotations
    @deprecated
    Deprecated

    (Since version 2.6.0) Use withPartitionPreFetchBufferLimit instead

  3. def withRestartStreamOnRebalancing(value: Boolean): ConsumerSettings

    value

    When true _all_ streams are restarted during a rebalance, including those streams that are not revoked. The default is false.

    Annotations
    @deprecated
    Deprecated

    (Since version 2.10.0) restartStreamOnRebalancing will be removed in zio-kafka 3.0

Inherited from Serializable

Inherited from Product

Inherited from Equals

Inherited from AnyRef

Inherited from Any

Ungrouped