package csv
Type Members
- class CSVDataSourceV2 extends FileDataSourceV2
-
case class
CSVPartitionReaderFactory(sqlConf: SQLConf, broadcastedConf: Broadcast[SerializableConfiguration], dataSchema: StructType, readDataSchema: StructType, partitionSchema: StructType, options: CSVOptions, filters: Seq[Filter]) extends FilePartitionReaderFactory with Product with Serializable
A factory used to create CSV readers.
A factory used to create CSV readers.
- sqlConf
SQL configuration.
- broadcastedConf
Broadcasted serializable Hadoop Configuration.
- dataSchema
Schema of CSV files.
- readDataSchema
Required data schema in the batch scan.
- partitionSchema
Schema of partitions.
- options
Options for parsing CSV files.
- case class CSVScan(sparkSession: SparkSession, fileIndex: PartitioningAwareFileIndex, dataSchema: StructType, readDataSchema: StructType, readPartitionSchema: StructType, options: CaseInsensitiveStringMap, pushedFilters: Array[Filter], partitionFilters: Seq[Expression] = Seq.empty, dataFilters: Seq[Expression] = Seq.empty) extends TextBasedFileScan with Product with Serializable
- case class CSVScanBuilder(sparkSession: SparkSession, fileIndex: PartitioningAwareFileIndex, schema: StructType, dataSchema: StructType, options: CaseInsensitiveStringMap) extends FileScanBuilder with Product with Serializable
- case class CSVTable(name: String, sparkSession: SparkSession, options: CaseInsensitiveStringMap, paths: Seq[String], userSpecifiedSchema: Option[StructType], fallbackFileFormat: Class[_ <: FileFormat]) extends FileTable with Product with Serializable
- case class CSVWrite(paths: Seq[String], formatName: String, supportsDataType: (DataType) ⇒ Boolean, info: LogicalWriteInfo) extends FileWrite with Product with Serializable