|
| 1 | +// Licensed to the Apache Software Foundation (ASF) under one |
| 2 | +// or more contributor license agreements. See the NOTICE file |
| 3 | +// distributed with this work for additional information |
| 4 | +// regarding copyright ownership. The ASF licenses this file |
| 5 | +// to you under the Apache License, Version 2.0 (the |
| 6 | +// "License"); you may not use this file except in compliance |
| 7 | +// with the License. You may obtain a copy of the License at |
| 8 | +// |
| 9 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +// |
| 11 | +// Unless required by applicable law or agreed to in writing, |
| 12 | +// software distributed under the License is distributed on an |
| 13 | +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 14 | +// KIND, either express or implied. See the License for the |
| 15 | +// specific language governing permissions and limitations |
| 16 | +// under the License. |
| 17 | + |
| 18 | +use arrow::array::RecordBatch; |
| 19 | +use arrow::datatypes::{DataType, Field, Schema}; |
| 20 | +use bytes::{BufMut, BytesMut}; |
| 21 | +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; |
| 22 | +use datafusion::config::ConfigOptions; |
| 23 | +use datafusion::prelude::{ParquetReadOptions, SessionContext}; |
| 24 | +use datafusion_execution::object_store::ObjectStoreUrl; |
| 25 | +use datafusion_physical_optimizer::push_down_filter::PushdownFilter; |
| 26 | +use datafusion_physical_optimizer::PhysicalOptimizerRule; |
| 27 | +use datafusion_physical_plan::ExecutionPlan; |
| 28 | +use object_store::memory::InMemory; |
| 29 | +use object_store::path::Path; |
| 30 | +use object_store::ObjectStore; |
| 31 | +use parquet::arrow::ArrowWriter; |
| 32 | +use std::sync::Arc; |
| 33 | + |
| 34 | +async fn create_plan() -> Arc<dyn ExecutionPlan> { |
| 35 | + let ctx = SessionContext::new(); |
| 36 | + let schema = Arc::new(Schema::new(vec![ |
| 37 | + Field::new("id", DataType::Int32, true), |
| 38 | + Field::new("name", DataType::Utf8, true), |
| 39 | + Field::new("age", DataType::UInt16, true), |
| 40 | + Field::new("salary", DataType::Float64, true), |
| 41 | + ])); |
| 42 | + let batch = RecordBatch::new_empty(schema); |
| 43 | + |
| 44 | + let store = Arc::new(InMemory::new()) as Arc<dyn ObjectStore>; |
| 45 | + let data = { |
| 46 | + let out = BytesMut::new(); |
| 47 | + let mut writer = |
| 48 | + ArrowWriter::try_new(out.writer(), batch.schema(), None).unwrap(); |
| 49 | + writer.write(&batch).unwrap(); |
| 50 | + writer.finish().unwrap(); |
| 51 | + writer.into_inner().unwrap().into_inner().freeze() |
| 52 | + }; |
| 53 | + store |
| 54 | + .put(&Path::from("test.parquet"), data.into()) |
| 55 | + .await |
| 56 | + .unwrap(); |
| 57 | + ctx.register_object_store( |
| 58 | + ObjectStoreUrl::parse("memory://").unwrap().as_ref(), |
| 59 | + store, |
| 60 | + ); |
| 61 | + |
| 62 | + ctx.register_parquet("t", "memory://", ParquetReadOptions::default()) |
| 63 | + .await |
| 64 | + .unwrap(); |
| 65 | + |
| 66 | + let df = ctx |
| 67 | + .sql( |
| 68 | + r" |
| 69 | + WITH brackets AS ( |
| 70 | + SELECT age % 10 AS age_bracket |
| 71 | + FROM t |
| 72 | + GROUP BY age % 10 |
| 73 | + HAVING COUNT(*) > 10 |
| 74 | + ) |
| 75 | + SELECT id, name, age, salary |
| 76 | + FROM t |
| 77 | + JOIN brackets ON t.age % 10 = brackets.age_bracket |
| 78 | + WHERE age > 20 AND data.salary > 1000 |
| 79 | + ORDER BY data.salary DESC |
| 80 | + LIMIT 100 |
| 81 | + ", |
| 82 | + ) |
| 83 | + .await |
| 84 | + .unwrap(); |
| 85 | + |
| 86 | + df.create_physical_plan().await.unwrap() |
| 87 | +} |
| 88 | + |
| 89 | +#[derive(Clone)] |
| 90 | +struct BenchmarkPlan { |
| 91 | + plan: Arc<dyn ExecutionPlan>, |
| 92 | + config: ConfigOptions, |
| 93 | +} |
| 94 | + |
| 95 | +impl std::fmt::Display for BenchmarkPlan { |
| 96 | + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |
| 97 | + write!(f, "BenchmarkPlan") |
| 98 | + } |
| 99 | +} |
| 100 | + |
| 101 | +fn bench_push_down_filter(c: &mut Criterion) { |
| 102 | + // Create a relatively complex plan |
| 103 | + let plan = tokio::runtime::Runtime::new() |
| 104 | + .unwrap() |
| 105 | + .block_on(create_plan()); |
| 106 | + let mut config = ConfigOptions::default(); |
| 107 | + config.execution.parquet.pushdown_filters = true; |
| 108 | + let plan = BenchmarkPlan { plan, config }; |
| 109 | + |
| 110 | + c.bench_with_input( |
| 111 | + BenchmarkId::new("push_down_filter", plan.clone()), |
| 112 | + &plan, |
| 113 | + |b, plan| { |
| 114 | + b.iter(|| { |
| 115 | + let optimizer = PushdownFilter::new(); |
| 116 | + optimizer |
| 117 | + .optimize(Arc::clone(&plan.plan), &plan.config) |
| 118 | + .unwrap(); |
| 119 | + }); |
| 120 | + }, |
| 121 | + ); |
| 122 | +} |
| 123 | + |
| 124 | +criterion_group!(benches, bench_push_down_filter); |
| 125 | +criterion_main!(benches); |
0 commit comments