case class MergedParameters(parameters: Map[String, String]) extends Product with Serializable
Adds validators and accessors to string map
- Alphabetic
- By Inheritance
- MergedParameters
- Serializable
- Serializable
- Product
- Equals
- AnyRef
- Any
- Hide All
- Show All
- Public
- All
Instance Constructors
- new MergedParameters(parameters: Map[String, String])
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
-
def
autoPushdown: Boolean
Snowflake automatically enable/disable pushdown function
- def awsAccessKey: Option[String]
- def awsSecretKey: Option[String]
- def azureSAS: Option[String]
- def bindVariableEnabled: Boolean
-
def
checkBucketConfiguration: Boolean
Returns true if bucket lifecycle configuration should be checked
- def checkTableExistenceInCurrentSchemaOnly: Boolean
-
def
clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
-
def
columnMap: Option[Map[String, String]]
Retrieve Column mapping data.
Retrieve Column mapping data. None if empty
- def columnMapping: String
- def columnMismatchBehavior: String
-
def
continueOnError: Boolean
Set on_error parameter to continue in COPY command todo: create data validation function in spark side instead of using COPY COMMAND
-
def
createPerQueryTempDir(): String
Creates a per-query subdirectory in the rootTempDir, with a random UUID.
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def expectedPartitionCount: Int
- def expectedPartitionSize: Long
-
def
extraCopyOptions: String
Extra options to append to the Snowflake COPY command (e.g.
Extra options to append to the Snowflake COPY command (e.g. "MAXERROR 100").
-
def
finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
- def getQueryIDUrl(queryID: String): String
-
def
getQueryResultFormat: Option[String]
Snowflake query result format
-
def
getTimeOutputFormat: Option[String]
Snowflake time output format
- def isExecuteQueryWithSyncMode: Boolean
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- def isSslON: Boolean
- def isTimestampSnowflake(timestampFormat: String): Boolean
- def isTimezoneSnowflake: Boolean
- def isTimezoneSnowflakeDefault: Boolean
- def isTimezoneSpark: Boolean
- def keepOriginalColumnNameCase: Boolean
- def maxRetryCount: Int
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def nonProxyHosts: Option[String]
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
lazy val
parallelism: Option[Int]
Number of threads used for PUT/GET.
- val parameters: Map[String, String]
-
def
postActions: Array[String]
List of semi-colon separated SQL statements to run after successful write operations.
List of semi-colon separated SQL statements to run after successful write operations. This can be useful for running GRANT operations to make your new tables readable to other users and groups.
If the action string contains %s, the table name will be substituted in, in case a staging table is being used.
Defaults to empty.
-
def
preActions: Array[String]
List of semi-colon separated SQL statements to run before write operations.
List of semi-colon separated SQL statements to run before write operations. This can be useful for running DELETE operations to clean up data
If the action string contains %s, the table name will be substituted in, in case a staging table is being used.
Defaults to empty.
-
def
privateKey: Option[PrivateKey]
Generate private key form pem key value
Generate private key form pem key value
- returns
private key object
- def proxyHost: Option[String]
- lazy val proxyInfo: Option[ProxyInfo]
- def proxyPassword: Option[String]
- def proxyPort: Option[String]
- def proxyUser: Option[String]
-
def
purge(): Boolean
Whether or not to have PURGE in the COPY statement generated by the Spark connector
-
def
query: Option[String]
The Snowflake query to be used as the target when loading data.
- def quoteJsonFieldName: Boolean
-
lazy val
rootTempDir: String
A root directory to be used for intermediate data exchange, expected to be on cloud storage (S3 or Azure storage), or somewhere that can be written to and read from by Snowflake.
A root directory to be used for intermediate data exchange, expected to be on cloud storage (S3 or Azure storage), or somewhere that can be written to and read from by Snowflake. Make sure that credentials are available for this cloud provider.
- lazy val rootTempDirStorageType: FSType
-
def
s3maxfilesize: String
Max file size used to move data out from Snowflake
-
def
setColumnMap(fromSchema: Option[StructType], toSchema: Option[StructType]): Unit
set column map
-
def
sfAccount: Option[String]
Snowflake account - optional
-
def
sfAuthenticator: Option[String]
Mapping OAuth and authenticator values
-
def
sfCompress: Boolean
Snowflake use compression on/off - "on" by default
-
def
sfDatabase: String
Snowflake database name
-
def
sfExtraOptions: Map[String, AnyRef]
Returns a map of options that are not known to the connector, and are passed verbosely to the JDBC driver
-
def
sfFullURL: String
URL pointing to the snowflake database including protocol.
URL pointing to the snowflake database including protocol. for example, https://host:port
-
def
sfPassword: String
Snowflake password
-
def
sfRole: Option[String]
Snowflake role - optional
-
def
sfSSL: String
Snowflake SSL on/off - "on" by default
-
def
sfSchema: String
Snowflake schema
- def sfTimestampLTZOutputFormat: Option[String]
- def sfTimestampNTZOutputFormat: Option[String]
- def sfTimestampTZOutputFormat: Option[String]
-
def
sfTimezone: Option[String]
Snowflake timezone- optional
- def sfToken: Option[String]
-
def
sfURL: String
URL pointing to the snowflake database, simply host:port
-
def
sfUser: String
Snowflake user
-
def
sfWarehouse: Option[String]
Snowflake warehouse
- def skipWriteWhenWritingEmptyDataFrame: Boolean
- def stagingTableNameRemoveQuotesOnly: Boolean
- def storagePath: Option[String]
- def streamingStage: Option[String]
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
-
def
table: Option[TableName]
The Snowflake table to be used as the target when loading or writing data.
-
def
temporaryAWSCredentials: Option[AWSCredentials]
Temporary AWS credentials which are passed to Snowflake.
Temporary AWS credentials which are passed to Snowflake. These only need to be supplied by the user when Hadoop is configured to authenticate to S3 via IAM roles assigned to EC2 instances.
-
def
temporaryAzureStorageCredentials: Option[StorageCredentialsSharedAccessSignature]
SAS Token to be passed to Snowflake to access data in Azure storage.
SAS Token to be passed to Snowflake to access data in Azure storage. We currently don't support full storage account key so this has to be provided if customer would like to load data through their storage account directly.
-
def
toString(): String
- Definition Classes
- MergedParameters → AnyRef → Any
-
def
truncateColumns(): Boolean
Whether or not to have TRUNCATE_COLUMNS in the COPY statement generated by the Spark connector.
-
def
truncateTable: Boolean
Truncate table when overwriting.
Truncate table when overwriting. Keep the table schema
- def uploadChunkSize: Int
- def useAWSRegionURL: Boolean
- def useAwsMultiplePartsUpload: Boolean
- def useCopyUnload: Boolean
- def useExponentialBackoff: Boolean
-
def
useProxy: Boolean
Proxy related parameters.
-
def
useStagingTable: Boolean
When true, data is always loaded into a new temporary table when performing an overwrite.
When true, data is always loaded into a new temporary table when performing an overwrite. This is to ensure that the whole load process succeeds before dropping any data from Snowflake, which can be useful if, in the event of failures, stale data is better than no data for your systems.
Defaults to true.
- lazy val usingExternalStage: Boolean
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()