case class SubscriberConfig[T](batchSize: Int = 100, concurrentRequests: Int = 5, refreshAfterOp: Boolean = false, listener: ResponseListener[T] = ResponseListener.noop, completionFn: () ⇒ Unit = () => (), successFn: () ⇒ Unit = () => (), errorFn: (Throwable) ⇒ Unit = e => (), failureWait: FiniteDuration = 2.seconds, maxAttempts: Int = 5, flushInterval: Option[FiniteDuration] = None, flushAfter: Option[FiniteDuration] = None) extends Product with Serializable
- batchSize
the number of elements to group together per batch aside from the last batch
- concurrentRequests
the number of concurrent batch operations
- refreshAfterOp
if the index should be refreshed after each bulk operation
- listener
a listener which is notified on each acknowledge batch item
- completionFn
a function which is invoked when all sent requests have been acknowledged and the publisher has completed Note: this function is executed regardless of whether there was an error or not, that is, this function is always invoked regardless of the state
- successFn
a function will is only invoked when all operations have completed successfully
- errorFn
a function which is invoked after there is an error
- failureWait
the timeout before re-trying failed requests. Usually a failed request is elasticsearch's way of indicating backpressure, so this parameter determines how long to wait between requests.
- maxAttempts
the max number of times to try a request. If it fails too many times it probably isn't back pressure but an error with the document.
- flushInterval
used to schedule periodic bulk indexing. This can be set to avoid waiting for a complete batch for a long period of time. It also is used if the publisher will never complete. This ensures that all elements are indexed, even if the last batch size is lower than batch size.
- flushAfter
used to schedule an index if no document has been received within the given duration. Once an index is performed (either by this flush value or because docs arrived in time) the flush after schedule is reset.
- Alphabetic
- By Inheritance
- SubscriberConfig
- Serializable
- Serializable
- Product
- Equals
- AnyRef
- Any
- Hide All
- Show All
- Public
- All
Instance Constructors
-
new
SubscriberConfig(batchSize: Int = 100, concurrentRequests: Int = 5, refreshAfterOp: Boolean = false, listener: ResponseListener[T] = ResponseListener.noop, completionFn: () ⇒ Unit = () => (), successFn: () ⇒ Unit = () => (), errorFn: (Throwable) ⇒ Unit = e => (), failureWait: FiniteDuration = 2.seconds, maxAttempts: Int = 5, flushInterval: Option[FiniteDuration] = None, flushAfter: Option[FiniteDuration] = None)
- batchSize
the number of elements to group together per batch aside from the last batch
- concurrentRequests
the number of concurrent batch operations
- refreshAfterOp
if the index should be refreshed after each bulk operation
- listener
a listener which is notified on each acknowledge batch item
- completionFn
a function which is invoked when all sent requests have been acknowledged and the publisher has completed Note: this function is executed regardless of whether there was an error or not, that is, this function is always invoked regardless of the state
- successFn
a function will is only invoked when all operations have completed successfully
- errorFn
a function which is invoked after there is an error
- failureWait
the timeout before re-trying failed requests. Usually a failed request is elasticsearch's way of indicating backpressure, so this parameter determines how long to wait between requests.
- maxAttempts
the max number of times to try a request. If it fails too many times it probably isn't back pressure but an error with the document.
- flushInterval
used to schedule periodic bulk indexing. This can be set to avoid waiting for a complete batch for a long period of time. It also is used if the publisher will never complete. This ensures that all elements are indexed, even if the last batch size is lower than batch size.
- flushAfter
used to schedule an index if no document has been received within the given duration. Once an index is performed (either by this flush value or because docs arrived in time) the flush after schedule is reset.
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
- val batchSize: Int
-
def
clone(): AnyRef
- Attributes
- protected[java.lang]
- Definition Classes
- AnyRef
- Annotations
- @native() @throws( ... )
- val completionFn: () ⇒ Unit
- val concurrentRequests: Int
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- val errorFn: (Throwable) ⇒ Unit
- val failureWait: FiniteDuration
-
def
finalize(): Unit
- Attributes
- protected[java.lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
- val flushAfter: Option[FiniteDuration]
- val flushInterval: Option[FiniteDuration]
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- val listener: ResponseListener[T]
- val maxAttempts: Int
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
- val refreshAfterOp: Boolean
- val successFn: () ⇒ Unit
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @throws( ... )