iceberg/writer/mod.rs
1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements. See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership. The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License. You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied. See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18//! Iceberg writer module.
19//!
20//! This module contains the generic writer trait and specific writer implementation. We categorize the writer into two types:
21//! 1. FileWriter: writer for physical file format (Such as parquet, orc).
22//! 2. IcebergWriter: writer for logical format provided by iceberg table (Such as data file, equality delete file, position delete file)
23//! or other function (Such as partition writer, delta writer).
24//!
25//! The IcebergWriter will use the inner FileWriter to write physical files.
26//!
27//! The writer interface is designed to be extensible and flexible. Writers can be independently configured
28//! and composed to support complex write logic. E.g. By combining `FanoutPartitionWriter`, `DataFileWriter`, and `ParquetWriter`,
29//! you can build a writer that automatically partitions the data and writes it in the Parquet format.
30//!
31//! For this purpose, there are four trait corresponding to these writer:
32//! - IcebergWriterBuilder
33//! - IcebergWriter
34//! - FileWriterBuilder
35//! - FileWriter
36//!
37//! Users can create specific writer builders, combine them, and build the final writer.
38//! They can also define custom writers by implementing the `Writer` trait,
39//! allowing seamless integration with existing writers. (See the example below.)
40//!
41//! # Simple example for the data file writer used parquet physical format:
42//! ```rust, no_run
43//! use std::collections::HashMap;
44//! use std::sync::Arc;
45//!
46//! use arrow_array::{ArrayRef, BooleanArray, Int32Array, RecordBatch, StringArray};
47//! use async_trait::async_trait;
48//! use iceberg::io::{FileIO, FileIOBuilder};
49//! use iceberg::spec::DataFile;
50//! use iceberg::transaction::Transaction;
51//! use iceberg::writer::base_writer::data_file_writer::DataFileWriterBuilder;
52//! use iceberg::writer::file_writer::ParquetWriterBuilder;
53//! use iceberg::writer::file_writer::location_generator::{
54//! DefaultFileNameGenerator, DefaultLocationGenerator,
55//! };
56//! use iceberg::writer::{IcebergWriter, IcebergWriterBuilder};
57//! use iceberg::{Catalog, CatalogBuilder, MemoryCatalog, Result, TableIdent};
58//! use parquet::file::properties::WriterProperties;
59//! #[tokio::main]
60//! async fn main() -> Result<()> {
61//! // Connect to a catalog.
62//! use iceberg::memory::{MEMORY_CATALOG_WAREHOUSE, MemoryCatalogBuilder};
63//! use iceberg::writer::file_writer::rolling_writer::{
64//! RollingFileWriter, RollingFileWriterBuilder,
65//! };
66//! let catalog = MemoryCatalogBuilder::default()
67//! .load(
68//! "memory",
69//! HashMap::from([(
70//! MEMORY_CATALOG_WAREHOUSE.to_string(),
71//! "file:///path/to/warehouse".to_string(),
72//! )]),
73//! )
74//! .await?;
75//! // Add customized code to create a table first.
76//!
77//! // Load table from catalog.
78//! let table = catalog
79//! .load_table(&TableIdent::from_strs(["hello", "world"])?)
80//! .await?;
81//! let location_generator = DefaultLocationGenerator::new(table.metadata().clone()).unwrap();
82//! let file_name_generator = DefaultFileNameGenerator::new(
83//! "test".to_string(),
84//! None,
85//! iceberg::spec::DataFileFormat::Parquet,
86//! );
87//!
88//! // Create a parquet file writer builder. The parameter can get from table.
89//! let parquet_writer_builder = ParquetWriterBuilder::new(
90//! WriterProperties::default(),
91//! table.metadata().current_schema().clone(),
92//! );
93//!
94//! // Create a rolling file writer using parquet file writer builder.
95//! let rolling_file_writer_builder = RollingFileWriterBuilder::new_with_default_file_size(
96//! parquet_writer_builder,
97//! table.file_io().clone(),
98//! location_generator.clone(),
99//! file_name_generator.clone(),
100//! );
101//!
102//! // Create a data file writer using parquet file writer builder.
103//! let data_file_writer_builder = DataFileWriterBuilder::new(rolling_file_writer_builder);
104//! // Build the data file writer
105//! let mut data_file_writer = data_file_writer_builder.build(None).await?;
106//!
107//! // Write the data using data_file_writer...
108//!
109//! // Close the write and it will return data files back
110//! let data_files = data_file_writer.close().await.unwrap();
111//!
112//! Ok(())
113//! }
114//! ```
115//!
116//! # Custom writer to record latency
117//! ```rust, no_run
118//! use std::collections::HashMap;
119//! use std::time::Instant;
120//!
121//! use arrow_array::RecordBatch;
122//! use iceberg::io::FileIOBuilder;
123//! use iceberg::memory::MemoryCatalogBuilder;
124//! use iceberg::spec::{DataFile, PartitionKey};
125//! use iceberg::writer::base_writer::data_file_writer::DataFileWriterBuilder;
126//! use iceberg::writer::file_writer::ParquetWriterBuilder;
127//! use iceberg::writer::file_writer::location_generator::{
128//! DefaultFileNameGenerator, DefaultLocationGenerator,
129//! };
130//! use iceberg::writer::{IcebergWriter, IcebergWriterBuilder};
131//! use iceberg::{Catalog, CatalogBuilder, MemoryCatalog, Result, TableIdent};
132//! use parquet::file::properties::WriterProperties;
133//!
134//! #[derive(Clone)]
135//! struct LatencyRecordWriterBuilder<B> {
136//! inner_writer_builder: B,
137//! }
138//!
139//! impl<B: IcebergWriterBuilder> LatencyRecordWriterBuilder<B> {
140//! pub fn new(inner_writer_builder: B) -> Self {
141//! Self {
142//! inner_writer_builder,
143//! }
144//! }
145//! }
146//!
147//! #[async_trait::async_trait]
148//! impl<B: IcebergWriterBuilder> IcebergWriterBuilder for LatencyRecordWriterBuilder<B> {
149//! type R = LatencyRecordWriter<B::R>;
150//!
151//! async fn build(&self, partition_key: Option<PartitionKey>) -> Result<Self::R> {
152//! Ok(LatencyRecordWriter {
153//! inner_writer: self.inner_writer_builder.build(partition_key).await?,
154//! })
155//! }
156//! }
157//! struct LatencyRecordWriter<W> {
158//! inner_writer: W,
159//! }
160//!
161//! #[async_trait::async_trait]
162//! impl<W: IcebergWriter> IcebergWriter for LatencyRecordWriter<W> {
163//! async fn write(&mut self, input: RecordBatch) -> Result<()> {
164//! let start = Instant::now();
165//! self.inner_writer.write(input).await?;
166//! let _latency = start.elapsed();
167//! // record latency...
168//! Ok(())
169//! }
170//!
171//! async fn close(&mut self) -> Result<Vec<DataFile>> {
172//! let start = Instant::now();
173//! let res = self.inner_writer.close().await?;
174//! let _latency = start.elapsed();
175//! // record latency...
176//! Ok(res)
177//! }
178//! }
179//!
180//! #[tokio::main]
181//! async fn main() -> Result<()> {
182//! // Connect to a catalog.
183//! use iceberg::memory::MEMORY_CATALOG_WAREHOUSE;
184//! use iceberg::spec::{Literal, PartitionKey, Struct};
185//! use iceberg::writer::file_writer::rolling_writer::{
186//! RollingFileWriter, RollingFileWriterBuilder,
187//! };
188//!
189//! let catalog = MemoryCatalogBuilder::default()
190//! .load(
191//! "memory",
192//! HashMap::from([(
193//! MEMORY_CATALOG_WAREHOUSE.to_string(),
194//! "file:///path/to/warehouse".to_string(),
195//! )]),
196//! )
197//! .await?;
198//!
199//! // Add customized code to create a table first.
200//!
201//! // Load table from catalog.
202//! let table = catalog
203//! .load_table(&TableIdent::from_strs(["hello", "world"])?)
204//! .await?;
205//! let partition_key = PartitionKey::new(
206//! table.metadata().default_partition_spec().as_ref().clone(),
207//! table.metadata().current_schema().clone(),
208//! Struct::from_iter(vec![Some(Literal::string("Seattle"))]),
209//! );
210//! let location_generator = DefaultLocationGenerator::new(table.metadata().clone()).unwrap();
211//! let file_name_generator = DefaultFileNameGenerator::new(
212//! "test".to_string(),
213//! None,
214//! iceberg::spec::DataFileFormat::Parquet,
215//! );
216//!
217//! // Create a parquet file writer builder. The parameter can get from table.
218//! let parquet_writer_builder = ParquetWriterBuilder::new(
219//! WriterProperties::default(),
220//! table.metadata().current_schema().clone(),
221//! );
222//!
223//! // Create a rolling file writer
224//! let rolling_file_writer_builder = RollingFileWriterBuilder::new(
225//! parquet_writer_builder,
226//! 512 * 1024 * 1024,
227//! table.file_io().clone(),
228//! location_generator.clone(),
229//! file_name_generator.clone(),
230//! );
231//!
232//! // Create a data file writer builder using rolling file writer.
233//! let data_file_writer_builder = DataFileWriterBuilder::new(rolling_file_writer_builder);
234//! // Create latency record writer using data file writer builder.
235//! let latency_record_builder = LatencyRecordWriterBuilder::new(data_file_writer_builder);
236//! // Build the final writer
237//! let mut latency_record_data_file_writer = latency_record_builder
238//! .build(Some(partition_key))
239//! .await
240//! .unwrap();
241//!
242//! Ok(())
243//! }
244//! ```
245//!
246//! # Adding Partitioning to Data File Writers
247//!
248//! You can wrap a `DataFileWriter` with partitioning writers to handle partitioned tables.
249//! Iceberg provides two partitioning strategies:
250//!
251//! ## FanoutWriter - For Unsorted Data
252//!
253//! Wraps the data file writer to handle unsorted data by maintaining multiple active writers.
254//! Use this when your data is not pre-sorted by partition key. Writes to different partitions
255//! can happen in any order, even interleaved.
256//!
257//! ```rust, no_run
258//! # // Same setup as the simple example above...
259//! # use iceberg::memory::{MEMORY_CATALOG_WAREHOUSE, MemoryCatalogBuilder};
260//! # use iceberg::writer::file_writer::rolling_writer::RollingFileWriterBuilder;
261//! # use iceberg::{Catalog, CatalogBuilder, Result, TableIdent};
262//! # use iceberg::writer::base_writer::data_file_writer::DataFileWriterBuilder;
263//! # use iceberg::writer::file_writer::ParquetWriterBuilder;
264//! # use iceberg::writer::file_writer::location_generator::{
265//! # DefaultFileNameGenerator, DefaultLocationGenerator,
266//! # };
267//! # use parquet::file::properties::WriterProperties;
268//! # use std::collections::HashMap;
269//! # #[tokio::main]
270//! # async fn main() -> Result<()> {
271//! # let catalog = MemoryCatalogBuilder::default()
272//! # .load("memory", HashMap::from([(MEMORY_CATALOG_WAREHOUSE.to_string(), "file:///path/to/warehouse".to_string())]))
273//! # .await?;
274//! # let table = catalog.load_table(&TableIdent::from_strs(["hello", "world"])?).await?;
275//! # let location_generator = DefaultLocationGenerator::new(table.metadata().clone()).unwrap();
276//! # let file_name_generator = DefaultFileNameGenerator::new("test".to_string(), None, iceberg::spec::DataFileFormat::Parquet);
277//! # let parquet_writer_builder = ParquetWriterBuilder::new(WriterProperties::default(), table.metadata().current_schema().clone());
278//! # let rolling_writer_builder = RollingFileWriterBuilder::new_with_default_file_size(
279//! # parquet_writer_builder, table.file_io().clone(), location_generator, file_name_generator);
280//! # let data_file_writer_builder = DataFileWriterBuilder::new(rolling_writer_builder);
281//!
282//! // Wrap the data file writer with FanoutWriter for partitioning
283//! use iceberg::writer::partitioning::fanout_writer::FanoutWriter;
284//! use iceberg::writer::partitioning::PartitioningWriter;
285//! use iceberg::spec::{Literal, PartitionKey, Struct};
286//!
287//! let mut fanout_writer = FanoutWriter::new(data_file_writer_builder);
288//!
289//! // Create partition keys for different regions
290//! let schema = table.metadata().current_schema().clone();
291//! let partition_spec = table.metadata().default_partition_spec().as_ref().clone();
292//!
293//! let partition_key_us = PartitionKey::new(
294//! partition_spec.clone(),
295//! schema.clone(),
296//! Struct::from_iter([Some(Literal::string("US"))]),
297//! );
298//!
299//! let partition_key_eu = PartitionKey::new(
300//! partition_spec.clone(),
301//! schema.clone(),
302//! Struct::from_iter([Some(Literal::string("EU"))]),
303//! );
304//!
305//! // Write to different partitions in any order - can interleave partition writes
306//! // fanout_writer.write(partition_key_us.clone(), batch_us1).await?;
307//! // fanout_writer.write(partition_key_eu.clone(), batch_eu1).await?;
308//! // fanout_writer.write(partition_key_us.clone(), batch_us2).await?; // Back to US - OK!
309//! // fanout_writer.write(partition_key_eu.clone(), batch_eu2).await?; // Back to EU - OK!
310//!
311//! let data_files = fanout_writer.close().await?;
312//! # Ok(())
313//! # }
314//! ```
315//!
316//! ## ClusteredWriter - For Sorted Data
317//!
318//! Wraps the data file writer for pre-sorted data. More memory efficient as it maintains
319//! only one active writer at a time, but requires input sorted by partition key.
320//!
321//! ```rust, no_run
322//! # // Same setup as the simple example above...
323//! # use iceberg::memory::{MEMORY_CATALOG_WAREHOUSE, MemoryCatalogBuilder};
324//! # use iceberg::writer::file_writer::rolling_writer::RollingFileWriterBuilder;
325//! # use iceberg::{Catalog, CatalogBuilder, Result, TableIdent};
326//! # use iceberg::writer::base_writer::data_file_writer::DataFileWriterBuilder;
327//! # use iceberg::writer::file_writer::ParquetWriterBuilder;
328//! # use iceberg::writer::file_writer::location_generator::{
329//! # DefaultFileNameGenerator, DefaultLocationGenerator,
330//! # };
331//! # use parquet::file::properties::WriterProperties;
332//! # use std::collections::HashMap;
333//! # #[tokio::main]
334//! # async fn main() -> Result<()> {
335//! # let catalog = MemoryCatalogBuilder::default()
336//! # .load("memory", HashMap::from([(MEMORY_CATALOG_WAREHOUSE.to_string(), "file:///path/to/warehouse".to_string())]))
337//! # .await?;
338//! # let table = catalog.load_table(&TableIdent::from_strs(["hello", "world"])?).await?;
339//! # let location_generator = DefaultLocationGenerator::new(table.metadata().clone()).unwrap();
340//! # let file_name_generator = DefaultFileNameGenerator::new("test".to_string(), None, iceberg::spec::DataFileFormat::Parquet);
341//! # let parquet_writer_builder = ParquetWriterBuilder::new(WriterProperties::default(), table.metadata().current_schema().clone());
342//! # let rolling_writer_builder = RollingFileWriterBuilder::new_with_default_file_size(
343//! # parquet_writer_builder, table.file_io().clone(), location_generator, file_name_generator);
344//! # let data_file_writer_builder = DataFileWriterBuilder::new(rolling_writer_builder);
345//!
346//! // Wrap the data file writer with ClusteredWriter for sorted partitioning
347//! use iceberg::writer::partitioning::clustered_writer::ClusteredWriter;
348//! use iceberg::writer::partitioning::PartitioningWriter;
349//! use iceberg::spec::{Literal, PartitionKey, Struct};
350//!
351//! let mut clustered_writer = ClusteredWriter::new(data_file_writer_builder);
352//!
353//! // Create partition keys (must write in sorted order)
354//! let schema = table.metadata().current_schema().clone();
355//! let partition_spec = table.metadata().default_partition_spec().as_ref().clone();
356//!
357//! let partition_key_asia = PartitionKey::new(
358//! partition_spec.clone(),
359//! schema.clone(),
360//! Struct::from_iter([Some(Literal::string("ASIA"))]),
361//! );
362//!
363//! let partition_key_eu = PartitionKey::new(
364//! partition_spec.clone(),
365//! schema.clone(),
366//! Struct::from_iter([Some(Literal::string("EU"))]),
367//! );
368//!
369//! let partition_key_us = PartitionKey::new(
370//! partition_spec.clone(),
371//! schema.clone(),
372//! Struct::from_iter([Some(Literal::string("US"))]),
373//! );
374//!
375//! // Write to partitions in sorted order (ASIA -> EU -> US)
376//! // clustered_writer.write(partition_key_asia, batch_asia).await?;
377//! // clustered_writer.write(partition_key_eu, batch_eu).await?;
378//! // clustered_writer.write(partition_key_us, batch_us).await?;
379//! // Writing back to ASIA would fail since data must be sorted!
380//!
381//! let data_files = clustered_writer.close().await?;
382//!
383//! Ok(())
384//! }
385//! ```
386
387pub mod base_writer;
388pub mod file_writer;
389pub mod partitioning;
390
391use arrow_array::RecordBatch;
392
393use crate::Result;
394use crate::spec::{DataFile, PartitionKey};
395
396type DefaultInput = RecordBatch;
397type DefaultOutput = Vec<DataFile>;
398
399/// The builder for iceberg writer.
400#[async_trait::async_trait]
401pub trait IcebergWriterBuilder<I = DefaultInput, O = DefaultOutput>: Send + Sync + 'static {
402 /// The associated writer type.
403 type R: IcebergWriter<I, O>;
404 /// Build the iceberg writer with an optional partition key.
405 async fn build(&self, partition_key: Option<PartitionKey>) -> Result<Self::R>;
406}
407
408/// The iceberg writer used to write data to iceberg table.
409#[async_trait::async_trait]
410pub trait IcebergWriter<I = DefaultInput, O = DefaultOutput>: Send + 'static {
411 /// Write data to iceberg table.
412 async fn write(&mut self, input: I) -> Result<()>;
413 /// Close the writer and return the written data files.
414 /// If close failed, the data written before maybe be lost. User may need to recreate the writer and rewrite the data again.
415 /// # NOTE
416 /// After close, regardless of success or failure, the writer should never be used again, otherwise the writer will panic.
417 async fn close(&mut self) -> Result<O>;
418}
419
420/// The current file status of the Iceberg writer.
421/// This is implemented for writers that write a single file at a time.
422pub trait CurrentFileStatus {
423 /// Get the current file path.
424 fn current_file_path(&self) -> String;
425 /// Get the current file row number.
426 fn current_row_num(&self) -> usize;
427 /// Get the current file written size.
428 fn current_written_size(&self) -> usize;
429}
430
431#[cfg(test)]
432mod tests {
433 use arrow_array::RecordBatch;
434 use arrow_schema::Schema;
435 use arrow_select::concat::concat_batches;
436 use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
437
438 use super::IcebergWriter;
439 use crate::io::FileIO;
440 use crate::spec::{DataFile, DataFileFormat};
441
442 // This function is used to guarantee the trait can be used as an object safe trait.
443 async fn _guarantee_object_safe(mut w: Box<dyn IcebergWriter>) {
444 let _ = w
445 .write(RecordBatch::new_empty(Schema::empty().into()))
446 .await;
447 let _ = w.close().await;
448 }
449
450 // This function check:
451 // The data of the written parquet file is correct.
452 // The metadata of the data file is consistent with the written parquet file.
453 pub(crate) async fn check_parquet_data_file(
454 file_io: &FileIO,
455 data_file: &DataFile,
456 batch: &RecordBatch,
457 ) {
458 assert_eq!(data_file.file_format, DataFileFormat::Parquet);
459
460 let input_file = file_io.new_input(data_file.file_path.clone()).unwrap();
461 // read the written file
462 let input_content = input_file.read().await.unwrap();
463 let reader_builder =
464 ParquetRecordBatchReaderBuilder::try_new(input_content.clone()).unwrap();
465
466 // check data
467 let reader = reader_builder.build().unwrap();
468 let batches = reader.map(|batch| batch.unwrap()).collect::<Vec<_>>();
469 let res = concat_batches(&batch.schema(), &batches).unwrap();
470 assert_eq!(*batch, res);
471 }
472}