Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions parquet/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -151,11 +151,6 @@ name = "write_parquet"
required-features = ["cli"]
path = "./examples/write_parquet.rs"

[[example]]
name = "async_read_parquet"
required-features = ["arrow", "async"]
path = "./examples/async_read_parquet.rs"

[[example]]
name = "read_with_rowgroup"
required-features = ["arrow", "async"]
Expand Down
69 changes: 0 additions & 69 deletions parquet/examples/async_read_parquet.rs

This file was deleted.

19 changes: 13 additions & 6 deletions parquet/src/arrow/arrow_reader/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -816,7 +816,7 @@ impl ArrowReaderMetadata {
/// Create [`ArrowReaderMetadata`] from the provided [`ArrowReaderOptions`]
/// and [`ChunkReader`]
///
/// See [`ParquetRecordBatchReaderBuilder::new_with_metadata`] for an
/// Seenano parquet/src/arrow/arrow_reader/mod.rs [`ParquetRecordBatchReaderBuilder::new_with_metadata`] for an
/// example of how this can be used
///
/// # Notes
Expand All @@ -838,9 +838,7 @@ impl ArrowReaderMetadata {

/// Create a new [`ArrowReaderMetadata`] from a pre-existing
/// [`ParquetMetaData`] and [`ArrowReaderOptions`].
///
/// # Notes
///
/// This function will not attempt to load the PageIndex if not present in the metadata, regardless
/// of the settings in `options`. See [`Self::load`] to load metadata including the page index if needed.
pub fn try_new(metadata: Arc<ParquetMetaData>, options: ArrowReaderOptions) -> Result<Self> {
Expand Down Expand Up @@ -978,8 +976,6 @@ pub type ParquetRecordBatchReaderBuilder<T> = ArrowReaderBuilder<SyncReader<T>>;

impl<T: ChunkReader + 'static> ParquetRecordBatchReaderBuilder<T> {
/// Create a new [`ParquetRecordBatchReaderBuilder`]
///
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this change means the old example isn't run

/// ```
/// # use std::sync::Arc;
/// # use bytes::Bytes;
/// # use arrow_array::{Int32Array, RecordBatch};
Expand All @@ -1004,7 +1000,18 @@ impl<T: ChunkReader + 'static> ParquetRecordBatchReaderBuilder<T> {
///
/// // Read data
/// let _batch = reader.next().unwrap().unwrap();
/// ```
/// # Example
/// rust,no_run
/// use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
///
/// let file = std::fs::File::open("data.parquet")?;
/// let mut builder = ParquetRecordBatchReaderBuilder::try_new(file)?;
/// let mut reader = builder.build()?;
Copy link

Copilot AI Jan 14, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The example creates a reader but never demonstrates using it to read data. Consider adding code that actually reads from the reader (e.g., iterating over batches) to provide a more complete demonstration of the API's functionality.

Suggested change
/// let mut reader = builder.build()?;
/// let mut reader = builder.build()?;
///
/// // Read all record batches from the reader
/// while let Some(batch) = reader.next().transpose()? {
/// println!("Read {} rows", batch.num_rows());
/// }

Copilot uses AI. Check for mistakes.
///
/// while let Some(batch) = reader.next().transpose()? {
/// println!("Read {} rows", batch.num_rows());
/// }
/// # Ok::<(), parquet::errors::ParquetError>(())
pub fn try_new(reader: T) -> Result<Self> {
Self::try_new_with_options(reader, Default::default())
}
Expand Down
Loading