public class SparkStreamingWrite
extends java.lang.Object
implements org.apache.spark.sql.connector.write.streaming.StreamingWrite
Modifier and Type | Class and Description |
---|---|
static class |
org.apache.iceberg.spark.source.SparkBatchWrite.TaskCommit |
Modifier and Type | Method and Description |
---|---|
void |
abort(long epochId,
org.apache.spark.sql.connector.write.WriterCommitMessage[] messages) |
void |
abort(org.apache.spark.sql.connector.write.WriterCommitMessage[] messages) |
void |
commit(long epochId,
org.apache.spark.sql.connector.write.WriterCommitMessage[] messages) |
void |
commit(org.apache.spark.sql.connector.write.WriterCommitMessage[] messages) |
protected void |
commitOperation(SnapshotUpdate<?> operation,
int numFiles,
java.lang.String description) |
org.apache.iceberg.spark.source.SparkBatchWrite.WriterFactory |
createBatchWriterFactory(org.apache.spark.sql.connector.write.PhysicalWriteInfo info) |
org.apache.spark.sql.connector.write.streaming.StreamingDataWriterFactory |
createStreamingWriterFactory(org.apache.spark.sql.connector.write.PhysicalWriteInfo info) |
protected java.lang.Iterable<DataFile> |
files(org.apache.spark.sql.connector.write.WriterCommitMessage[] messages) |
protected FileFormat |
getFileFormat(java.util.Map<java.lang.String,java.lang.String> tableProperties,
java.util.Map<java.lang.String,java.lang.String> options) |
protected Table |
table() |
java.lang.String |
toString() |
public org.apache.spark.sql.connector.write.streaming.StreamingDataWriterFactory createStreamingWriterFactory(org.apache.spark.sql.connector.write.PhysicalWriteInfo info)
createStreamingWriterFactory
in interface org.apache.spark.sql.connector.write.streaming.StreamingWrite
public void commit(long epochId, org.apache.spark.sql.connector.write.WriterCommitMessage[] messages)
commit
in interface org.apache.spark.sql.connector.write.streaming.StreamingWrite
public void abort(long epochId, org.apache.spark.sql.connector.write.WriterCommitMessage[] messages)
abort
in interface org.apache.spark.sql.connector.write.streaming.StreamingWrite
protected FileFormat getFileFormat(java.util.Map<java.lang.String,java.lang.String> tableProperties, java.util.Map<java.lang.String,java.lang.String> options)
public org.apache.iceberg.spark.source.SparkBatchWrite.WriterFactory createBatchWriterFactory(org.apache.spark.sql.connector.write.PhysicalWriteInfo info)
createBatchWriterFactory
in interface org.apache.spark.sql.connector.write.BatchWrite
public void commit(org.apache.spark.sql.connector.write.WriterCommitMessage[] messages)
commit
in interface org.apache.spark.sql.connector.write.BatchWrite
protected void commitOperation(SnapshotUpdate<?> operation, int numFiles, java.lang.String description)
public void abort(org.apache.spark.sql.connector.write.WriterCommitMessage[] messages)
abort
in interface org.apache.spark.sql.connector.write.BatchWrite
protected Table table()
protected java.lang.Iterable<DataFile> files(org.apache.spark.sql.connector.write.WriterCommitMessage[] messages)
public java.lang.String toString()
toString
in class java.lang.Object