Packages

final case class Comet(env: String, tmpdir: String, datasets: String, metadata: String, metrics: Metrics, audit: Audit, archive: Boolean, sinkToFile: Boolean, lock: Lock, defaultWriteFormat: String, defaultRejectedWriteFormat: String, defaultAuditWriteFormat: String, csvOutput: Boolean, csvOutputExt: String, privacyOnly: Boolean, launcher: String, chewerPrefix: String, rowValidatorClass: String, treeValidatorClass: String, loadStrategyClass: String, analyze: Boolean, hive: Boolean, grouped: Boolean, mergeForceDistinct: Boolean, area: Area, airflow: Airflow, elasticsearch: Elasticsearch, hadoop: Map[String, String], connections: Map[String, Connection], jdbcEngines: Map[String, JdbcEngine], atlas: Atlas, privacy: Privacy, fileSystem: Option[String], metadataFileSystem: Option[String], internal: Option[Internal], udfs: Option[String], assertions: Assertions, kafka: KafkaConfig, sqlParameterPattern: String) extends Serializable with Product

datasets

: Absolute path, datasets root folder beneath which each area is defined.

metadata

: Absolute path, location where all types / domains and auto jobs are defined

metrics

: Absolute path, location where all computed metrics are stored

audit

: Absolute path, location where all log are stored

archive

: Should we backup the ingested datasets ? true by default

defaultWriteFormat

: Choose between parquet, orc ... Default is parquet

defaultRejectedWriteFormat

: Writing format for rejected datasets, choose between parquet, orc ... Default is parquet

defaultAuditWriteFormat

: Writing format for audit datasets, choose between parquet, orc ... Default is parquet

launcher

: Cron Job Manager : simple (useful for testing) or airflow ? simple by default

analyze

: Should we create basics Hive statistics on the generated dataset ? true by default

hive

: Should we create a Hive Table ? true by default

area

: see Area above

airflow

: Airflow end point. Should be defined even if simple launccher is used instead of airflow.

Linear Supertypes
Product, Equals, Serializable, Serializable, AnyRef, Any
Ordering
  1. Alphabetic
  2. By Inheritance
Inherited
  1. Comet
  2. Product
  3. Equals
  4. Serializable
  5. Serializable
  6. AnyRef
  7. Any
  1. Hide All
  2. Show All
Visibility
  1. Public
  2. All

Instance Constructors

  1. new Comet(env: String, tmpdir: String, datasets: String, metadata: String, metrics: Metrics, audit: Audit, archive: Boolean, sinkToFile: Boolean, lock: Lock, defaultWriteFormat: String, defaultRejectedWriteFormat: String, defaultAuditWriteFormat: String, csvOutput: Boolean, csvOutputExt: String, privacyOnly: Boolean, launcher: String, chewerPrefix: String, rowValidatorClass: String, treeValidatorClass: String, loadStrategyClass: String, analyze: Boolean, hive: Boolean, grouped: Boolean, mergeForceDistinct: Boolean, area: Area, airflow: Airflow, elasticsearch: Elasticsearch, hadoop: Map[String, String], connections: Map[String, Connection], jdbcEngines: Map[String, JdbcEngine], atlas: Atlas, privacy: Privacy, fileSystem: Option[String], metadataFileSystem: Option[String], internal: Option[Internal], udfs: Option[String], assertions: Assertions, kafka: KafkaConfig, sqlParameterPattern: String)

    datasets

    : Absolute path, datasets root folder beneath which each area is defined.

    metadata

    : Absolute path, location where all types / domains and auto jobs are defined

    metrics

    : Absolute path, location where all computed metrics are stored

    audit

    : Absolute path, location where all log are stored

    archive

    : Should we backup the ingested datasets ? true by default

    defaultWriteFormat

    : Choose between parquet, orc ... Default is parquet

    defaultRejectedWriteFormat

    : Writing format for rejected datasets, choose between parquet, orc ... Default is parquet

    defaultAuditWriteFormat

    : Writing format for audit datasets, choose between parquet, orc ... Default is parquet

    launcher

    : Cron Job Manager : simple (useful for testing) or airflow ? simple by default

    analyze

    : Should we create basics Hive statistics on the generated dataset ? true by default

    hive

    : Should we create a Hive Table ? true by default

    area

    : see Area above

    airflow

    : Airflow end point. Should be defined even if simple launccher is used instead of airflow.

Value Members

  1. final def !=(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  2. final def ##(): Int
    Definition Classes
    AnyRef → Any
  3. final def ==(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  4. val airflow: Airflow
  5. val analyze: Boolean
  6. val archive: Boolean
  7. val area: Area
  8. final def asInstanceOf[T0]: T0
    Definition Classes
    Any
  9. val assertions: Assertions
  10. val atlas: Atlas
  11. val audit: Audit
  12. val cacheStorageLevel: StorageLevel
  13. val chewerPrefix: String
  14. def clone(): AnyRef
    Attributes
    protected[lang]
    Definition Classes
    AnyRef
    Annotations
    @throws( ... ) @native()
  15. val connections: Map[String, Connection]
  16. val csvOutput: Boolean
  17. val csvOutputExt: String
  18. val datasets: String
  19. val defaultAuditWriteFormat: String
  20. val defaultRejectedWriteFormat: String
  21. val defaultWriteFormat: String
  22. val elasticsearch: Elasticsearch
  23. val env: String
  24. final def eq(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  25. val fileSystem: Option[String]
  26. def finalize(): Unit
    Attributes
    protected[lang]
    Definition Classes
    AnyRef
    Annotations
    @throws( classOf[java.lang.Throwable] )
  27. final def getClass(): Class[_]
    Definition Classes
    AnyRef → Any
    Annotations
    @native()
  28. val grouped: Boolean
  29. val hadoop: Map[String, String]
  30. val hive: Boolean
  31. val internal: Option[Internal]
  32. def isElasticsearchSupported(): Boolean
    Annotations
    @JsonIgnore()
  33. final def isInstanceOf[T0]: Boolean
    Definition Classes
    Any
  34. val jdbcEngines: Map[String, JdbcEngine]
  35. val kafka: KafkaConfig
  36. val launcher: String
  37. val loadStrategyClass: String
  38. val lock: Lock
  39. val mergeForceDistinct: Boolean
  40. val metadata: String
  41. val metadataFileSystem: Option[String]
  42. val metrics: Metrics
  43. final def ne(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  44. final def notify(): Unit
    Definition Classes
    AnyRef
    Annotations
    @native()
  45. final def notifyAll(): Unit
    Definition Classes
    AnyRef
    Annotations
    @native()
  46. val privacy: Privacy
  47. val privacyOnly: Boolean
  48. val rowValidatorClass: String
  49. val sinkToFile: Boolean
  50. val sqlParameterPattern: String
  51. final def synchronized[T0](arg0: ⇒ T0): T0
    Definition Classes
    AnyRef
  52. val tmpdir: String
  53. val treeValidatorClass: String
  54. val udfs: Option[String]
  55. final def wait(): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  56. final def wait(arg0: Long, arg1: Int): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  57. final def wait(arg0: Long): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws( ... ) @native()
  58. def writeReplace: AnyRef
    Attributes
    protected
    Annotations
    @throws( classOf[ObjectStreamException] )

Inherited from Product

Inherited from Equals

Inherited from Serializable

Inherited from Serializable

Inherited from AnyRef

Inherited from Any

Ungrouped