Skip to content

Commit 6bb352c

Browse files
committed
Consolidate together Bevy's TaskPools
1 parent 9d420b4 commit 6bb352c

File tree

16 files changed

+123
-260
lines changed

16 files changed

+123
-260
lines changed

crates/bevy_asset/src/processor/mod.rs

+4-4
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ use crate::{
1818
};
1919
use bevy_ecs::prelude::*;
2020
use bevy_log::{debug, error, trace, warn};
21-
use bevy_tasks::IoTaskPool;
21+
use bevy_tasks::ComputeTaskPool;
2222
use bevy_utils::{BoxedFuture, HashMap, HashSet};
2323
use futures_io::ErrorKind;
2424
use futures_lite::{AsyncReadExt, AsyncWriteExt, StreamExt};
@@ -165,7 +165,7 @@ impl AssetProcessor {
165165
pub fn process_assets(&self) {
166166
let start_time = std::time::Instant::now();
167167
debug!("Processing Assets");
168-
IoTaskPool::get().scope(|scope| {
168+
ComputeTaskPool::get().scope(|scope| {
169169
scope.spawn(async move {
170170
self.initialize().await.unwrap();
171171
for source in self.sources().iter_processed() {
@@ -315,7 +315,7 @@ impl AssetProcessor {
315315
#[cfg(any(target_arch = "wasm32", not(feature = "multi-threaded")))]
316316
error!("AddFolder event cannot be handled in single threaded mode (or WASM) yet.");
317317
#[cfg(all(not(target_arch = "wasm32"), feature = "multi-threaded"))]
318-
IoTaskPool::get().scope(|scope| {
318+
ComputeTaskPool::get().scope(|scope| {
319319
scope.spawn(async move {
320320
self.process_assets_internal(scope, source, path)
321321
.await
@@ -457,7 +457,7 @@ impl AssetProcessor {
457457
loop {
458458
let mut check_reprocess_queue =
459459
std::mem::take(&mut self.data.asset_infos.write().await.check_reprocess_queue);
460-
IoTaskPool::get().scope(|scope| {
460+
ComputeTaskPool::get().scope(|scope| {
461461
for path in check_reprocess_queue.drain(..) {
462462
let processor = self.clone();
463463
let source = self.get_source(path.source()).unwrap();

crates/bevy_asset/src/server/loaders.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ use crate::{
44
};
55
use async_broadcast::RecvError;
66
use bevy_log::{error, warn};
7-
use bevy_tasks::IoTaskPool;
7+
use bevy_tasks::ComputeTaskPool;
88
use bevy_utils::{HashMap, TypeIdMap};
99
use std::{any::TypeId, sync::Arc};
1010
use thiserror::Error;
@@ -78,7 +78,7 @@ impl AssetLoaders {
7878
match maybe_loader {
7979
MaybeAssetLoader::Ready(_) => unreachable!(),
8080
MaybeAssetLoader::Pending { sender, .. } => {
81-
IoTaskPool::get()
81+
ComputeTaskPool::get()
8282
.spawn(async move {
8383
let _ = sender.broadcast(loader).await;
8484
})

crates/bevy_asset/src/server/mod.rs

+5-5
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ use crate::{
1919
};
2020
use bevy_ecs::prelude::*;
2121
use bevy_log::{error, info};
22-
use bevy_tasks::IoTaskPool;
22+
use bevy_tasks::ComputeTaskPool;
2323
use bevy_utils::{CowArc, HashSet};
2424
use crossbeam_channel::{Receiver, Sender};
2525
use futures_lite::StreamExt;
@@ -296,7 +296,7 @@ impl AssetServer {
296296
if should_load {
297297
let owned_handle = Some(handle.clone().untyped());
298298
let server = self.clone();
299-
IoTaskPool::get()
299+
ComputeTaskPool::get()
300300
.spawn(async move {
301301
if let Err(err) = server.load_internal(owned_handle, path, false, None).await {
302302
error!("{}", err);
@@ -366,7 +366,7 @@ impl AssetServer {
366366
let id = handle.id().untyped();
367367

368368
let server = self.clone();
369-
IoTaskPool::get()
369+
ComputeTaskPool::get()
370370
.spawn(async move {
371371
let path_clone = path.clone();
372372
match server.load_untyped_async(path).await {
@@ -551,7 +551,7 @@ impl AssetServer {
551551
pub fn reload<'a>(&self, path: impl Into<AssetPath<'a>>) {
552552
let server = self.clone();
553553
let path = path.into().into_owned();
554-
IoTaskPool::get()
554+
ComputeTaskPool::get()
555555
.spawn(async move {
556556
let mut reloaded = false;
557557

@@ -690,7 +690,7 @@ impl AssetServer {
690690

691691
let path = path.into_owned();
692692
let server = self.clone();
693-
IoTaskPool::get()
693+
ComputeTaskPool::get()
694694
.spawn(async move {
695695
let Ok(source) = server.get_source(path.source()) else {
696696
error!(

crates/bevy_core/src/lib.rs

+2-19
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,7 @@ fn register_math_types(app: &mut App) {
112112
.register_type::<Vec<bevy_math::Vec3>>();
113113
}
114114

115-
/// Setup of default task pools: [`AsyncComputeTaskPool`](bevy_tasks::AsyncComputeTaskPool),
116-
/// [`ComputeTaskPool`](bevy_tasks::ComputeTaskPool), [`IoTaskPool`](bevy_tasks::IoTaskPool).
115+
/// Setup of default task pool: [`ComputeTaskPool`](bevy_tasks::ComputeTaskPool).
117116
#[derive(Default)]
118117
pub struct TaskPoolPlugin {
119118
/// Options for the [`TaskPool`](bevy_tasks::TaskPool) created at application start.
@@ -175,39 +174,23 @@ pub fn update_frame_count(mut frame_count: ResMut<FrameCount>) {
175174
#[cfg(test)]
176175
mod tests {
177176
use super::*;
178-
use bevy_tasks::prelude::{AsyncComputeTaskPool, ComputeTaskPool, IoTaskPool};
177+
use bevy_tasks::prelude::ComputeTaskPool;
179178

180179
#[test]
181180
fn runs_spawn_local_tasks() {
182181
let mut app = App::new();
183182
app.add_plugins((TaskPoolPlugin::default(), TypeRegistrationPlugin));
184183

185-
let (async_tx, async_rx) = crossbeam_channel::unbounded();
186-
AsyncComputeTaskPool::get()
187-
.spawn_local(async move {
188-
async_tx.send(()).unwrap();
189-
})
190-
.detach();
191-
192184
let (compute_tx, compute_rx) = crossbeam_channel::unbounded();
193185
ComputeTaskPool::get()
194186
.spawn_local(async move {
195187
compute_tx.send(()).unwrap();
196188
})
197189
.detach();
198190

199-
let (io_tx, io_rx) = crossbeam_channel::unbounded();
200-
IoTaskPool::get()
201-
.spawn_local(async move {
202-
io_tx.send(()).unwrap();
203-
})
204-
.detach();
205-
206191
app.run();
207192

208-
async_rx.try_recv().unwrap();
209193
compute_rx.try_recv().unwrap();
210-
io_rx.try_recv().unwrap();
211194
}
212195

213196
#[test]
+7-110
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,6 @@
1-
use bevy_tasks::{AsyncComputeTaskPool, ComputeTaskPool, IoTaskPool, TaskPoolBuilder};
1+
use bevy_tasks::{ComputeTaskPool, TaskPoolBuilder};
22
use bevy_utils::tracing::trace;
33

4-
/// Defines a simple way to determine how many threads to use given the number of remaining cores
5-
/// and number of total cores
6-
#[derive(Clone, Debug)]
7-
pub struct TaskPoolThreadAssignmentPolicy {
8-
/// Force using at least this many threads
9-
pub min_threads: usize,
10-
/// Under no circumstance use more than this many threads for this pool
11-
pub max_threads: usize,
12-
/// Target using this percentage of total cores, clamped by min_threads and max_threads. It is
13-
/// permitted to use 1.0 to try to use all remaining threads
14-
pub percent: f32,
15-
}
16-
17-
impl TaskPoolThreadAssignmentPolicy {
18-
/// Determine the number of threads to use for this task pool
19-
fn get_number_of_threads(&self, remaining_threads: usize, total_threads: usize) -> usize {
20-
assert!(self.percent >= 0.0);
21-
let mut desired = (total_threads as f32 * self.percent).round() as usize;
22-
23-
// Limit ourselves to the number of cores available
24-
desired = desired.min(remaining_threads);
25-
26-
// Clamp by min_threads, max_threads. (This may result in us using more threads than are
27-
// available, this is intended. An example case where this might happen is a device with
28-
// <= 2 threads.
29-
desired.clamp(self.min_threads, self.max_threads)
30-
}
31-
}
32-
334
/// Helper for configuring and creating the default task pools. For end-users who want full control,
345
/// set up [`TaskPoolPlugin`](super::TaskPoolPlugin)
356
#[derive(Clone, Debug)]
@@ -40,13 +11,6 @@ pub struct TaskPoolOptions {
4011
/// If the number of physical cores is greater than max_total_threads, force using
4112
/// max_total_threads
4213
pub max_total_threads: usize,
43-
44-
/// Used to determine number of IO threads to allocate
45-
pub io: TaskPoolThreadAssignmentPolicy,
46-
/// Used to determine number of async compute threads to allocate
47-
pub async_compute: TaskPoolThreadAssignmentPolicy,
48-
/// Used to determine number of compute threads to allocate
49-
pub compute: TaskPoolThreadAssignmentPolicy,
5014
}
5115

5216
impl Default for TaskPoolOptions {
@@ -55,27 +19,6 @@ impl Default for TaskPoolOptions {
5519
// By default, use however many cores are available on the system
5620
min_total_threads: 1,
5721
max_total_threads: usize::MAX,
58-
59-
// Use 25% of cores for IO, at least 1, no more than 4
60-
io: TaskPoolThreadAssignmentPolicy {
61-
min_threads: 1,
62-
max_threads: 4,
63-
percent: 0.25,
64-
},
65-
66-
// Use 25% of cores for async compute, at least 1, no more than 4
67-
async_compute: TaskPoolThreadAssignmentPolicy {
68-
min_threads: 1,
69-
max_threads: 4,
70-
percent: 0.25,
71-
},
72-
73-
// Use all remaining cores for compute (at least 1)
74-
compute: TaskPoolThreadAssignmentPolicy {
75-
min_threads: 1,
76-
max_threads: usize::MAX,
77-
percent: 1.0, // This 1.0 here means "whatever is left over"
78-
},
7922
}
8023
}
8124
}
@@ -96,57 +39,11 @@ impl TaskPoolOptions {
9639
.clamp(self.min_total_threads, self.max_total_threads);
9740
trace!("Assigning {} cores to default task pools", total_threads);
9841

99-
let mut remaining_threads = total_threads;
100-
101-
{
102-
// Determine the number of IO threads we will use
103-
let io_threads = self
104-
.io
105-
.get_number_of_threads(remaining_threads, total_threads);
106-
107-
trace!("IO Threads: {}", io_threads);
108-
remaining_threads = remaining_threads.saturating_sub(io_threads);
109-
110-
IoTaskPool::get_or_init(|| {
111-
TaskPoolBuilder::default()
112-
.num_threads(io_threads)
113-
.thread_name("IO Task Pool".to_string())
114-
.build()
115-
});
116-
}
117-
118-
{
119-
// Determine the number of async compute threads we will use
120-
let async_compute_threads = self
121-
.async_compute
122-
.get_number_of_threads(remaining_threads, total_threads);
123-
124-
trace!("Async Compute Threads: {}", async_compute_threads);
125-
remaining_threads = remaining_threads.saturating_sub(async_compute_threads);
126-
127-
AsyncComputeTaskPool::get_or_init(|| {
128-
TaskPoolBuilder::default()
129-
.num_threads(async_compute_threads)
130-
.thread_name("Async Compute Task Pool".to_string())
131-
.build()
132-
});
133-
}
134-
135-
{
136-
// Determine the number of compute threads we will use
137-
// This is intentionally last so that an end user can specify 1.0 as the percent
138-
let compute_threads = self
139-
.compute
140-
.get_number_of_threads(remaining_threads, total_threads);
141-
142-
trace!("Compute Threads: {}", compute_threads);
143-
144-
ComputeTaskPool::get_or_init(|| {
145-
TaskPoolBuilder::default()
146-
.num_threads(compute_threads)
147-
.thread_name("Compute Task Pool".to_string())
148-
.build()
149-
});
150-
}
42+
ComputeTaskPool::get_or_init(|| {
43+
TaskPoolBuilder::default()
44+
.num_threads(total_threads)
45+
.thread_name("Compute Task Pool".to_string())
46+
.build()
47+
});
15148
}
15249
}

crates/bevy_gltf/src/loader.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ use bevy_render::{
3434
};
3535
use bevy_scene::Scene;
3636
#[cfg(not(target_arch = "wasm32"))]
37-
use bevy_tasks::IoTaskPool;
37+
use bevy_tasks::ComputeTaskPool;
3838
use bevy_transform::components::Transform;
3939
use bevy_utils::{
4040
smallvec::{smallvec, SmallVec},
@@ -348,7 +348,7 @@ async fn load_gltf<'a, 'b, 'c>(
348348
}
349349
} else {
350350
#[cfg(not(target_arch = "wasm32"))]
351-
IoTaskPool::get()
351+
ComputeTaskPool::get()
352352
.scope(|scope| {
353353
gltf.textures().for_each(|gltf_texture| {
354354
let parent_path = load_context.path().parent().unwrap();

crates/bevy_render/src/lib.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -306,7 +306,7 @@ impl Plugin for RenderPlugin {
306306
};
307307
// In wasm, spawn a task and detach it for execution
308308
#[cfg(target_arch = "wasm32")]
309-
bevy_tasks::IoTaskPool::get()
309+
bevy_tasks::ComputeTaskPool::get()
310310
.spawn_local(async_renderer)
311311
.detach();
312312
// Otherwise, just block for it to complete

crates/bevy_render/src/render_resource/pipeline_cache.rs

+7-10
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ use bevy_utils::{
1212
use naga::valid::Capabilities;
1313
use std::{
1414
borrow::Cow,
15-
future::Future,
1615
hash::Hash,
1716
mem,
1817
ops::Deref,
@@ -697,8 +696,7 @@ impl PipelineCache {
697696
let device = self.device.clone();
698697
let shader_cache = self.shader_cache.clone();
699698
let layout_cache = self.layout_cache.clone();
700-
create_pipeline_task(
701-
async move {
699+
create_pipeline_task(move || {
702700
let mut shader_cache = shader_cache.lock().unwrap();
703701
let mut layout_cache = layout_cache.lock().unwrap();
704702

@@ -796,8 +794,7 @@ impl PipelineCache {
796794
let device = self.device.clone();
797795
let shader_cache = self.shader_cache.clone();
798796
let layout_cache = self.layout_cache.clone();
799-
create_pipeline_task(
800-
async move {
797+
create_pipeline_task(move || {
801798
let mut shader_cache = shader_cache.lock().unwrap();
802799
let mut layout_cache = layout_cache.lock().unwrap();
803800

@@ -953,14 +950,14 @@ impl PipelineCache {
953950
feature = "multi-threaded"
954951
))]
955952
fn create_pipeline_task(
956-
task: impl Future<Output = Result<Pipeline, PipelineCacheError>> + Send + 'static,
953+
task: impl FnOnce() -> Result<Pipeline, PipelineCacheError> + Send + 'static,
957954
sync: bool,
958955
) -> CachedPipelineState {
959956
if !sync {
960-
return CachedPipelineState::Creating(bevy_tasks::AsyncComputeTaskPool::get().spawn(task));
957+
return CachedPipelineState::Creating(bevy_tasks::ComputeTaskPool::get().spawn_blocking(task));
961958
}
962959

963-
match futures_lite::future::block_on(task) {
960+
match task() {
964961
Ok(pipeline) => CachedPipelineState::Ok(pipeline),
965962
Err(err) => CachedPipelineState::Err(err),
966963
}
@@ -972,10 +969,10 @@ fn create_pipeline_task(
972969
not(feature = "multi-threaded")
973970
))]
974971
fn create_pipeline_task(
975-
task: impl Future<Output = Result<Pipeline, PipelineCacheError>> + Send + 'static,
972+
task: impl FnOnce() -> Result<Pipeline, PipelineCacheError> + Send + 'static,
976973
_sync: bool,
977974
) -> CachedPipelineState {
978-
match futures_lite::future::block_on(task) {
975+
match task() {
979976
Ok(pipeline) => CachedPipelineState::Ok(pipeline),
980977
Err(err) => CachedPipelineState::Err(err),
981978
}

0 commit comments

Comments
 (0)