Packages

package csv

Type Members

  1. class CSVDataSourceV2 extends FileDataSourceV2
  2. case class CSVPartitionReaderFactory(sqlConf: SQLConf, broadcastedConf: Broadcast[SerializableConfiguration], dataSchema: StructType, readDataSchema: StructType, partitionSchema: StructType, parsedOptions: CSVOptions, filters: Seq[Filter]) extends FilePartitionReaderFactory with Product with Serializable

    A factory used to create CSV readers.

    A factory used to create CSV readers.

    sqlConf

    SQL configuration.

    broadcastedConf

    Broadcasted serializable Hadoop Configuration.

    dataSchema

    Schema of CSV files.

    readDataSchema

    Required data schema in the batch scan.

    partitionSchema

    Schema of partitions.

    parsedOptions

    Options for parsing CSV files.

  3. case class CSVScan(sparkSession: SparkSession, fileIndex: PartitioningAwareFileIndex, dataSchema: StructType, readDataSchema: StructType, readPartitionSchema: StructType, options: CaseInsensitiveStringMap, pushedFilters: Array[Filter], partitionFilters: Seq[Expression] = Seq.empty, dataFilters: Seq[Expression] = Seq.empty) extends TextBasedFileScan with Product with Serializable
  4. case class CSVScanBuilder(sparkSession: SparkSession, fileIndex: PartitioningAwareFileIndex, schema: StructType, dataSchema: StructType, options: CaseInsensitiveStringMap) extends FileScanBuilder with Product with Serializable
  5. case class CSVTable(name: String, sparkSession: SparkSession, options: CaseInsensitiveStringMap, paths: Seq[String], userSpecifiedSchema: Option[StructType], fallbackFileFormat: Class[_ <: FileFormat]) extends FileTable with Product with Serializable
  6. case class CSVWrite(paths: Seq[String], formatName: String, supportsDataType: (DataType) => Boolean, info: LogicalWriteInfo) extends FileWrite with Product with Serializable

Ungrouped