From 324da1326ce5cc99c0c434cec997f9fef8288935 Mon Sep 17 00:00:00 2001 From: Lee Bernick Date: Sun, 30 Aug 2020 20:48:18 -0400 Subject: [PATCH 01/12] More beginner-friendly TCP server example --- examples/01_05_http_server/Cargo.toml | 18 --- examples/01_05_http_server/src/lib.rs | 91 ----------- examples/08_01_sync_tcp_server/404.html | 11 ++ examples/08_01_sync_tcp_server/Cargo.toml | 9 ++ examples/08_01_sync_tcp_server/hello.html | 11 ++ examples/08_01_sync_tcp_server/src/main.rs | 39 +++++ examples/08_02_async_tcp_server/Cargo.toml | 10 ++ examples/08_02_async_tcp_server/src/main.rs | 23 +++ .../08_03_concurrent_tcp_server/Cargo.toml | 10 ++ .../08_03_concurrent_tcp_server/src/main.rs | 20 +++ .../08_04_nonblocking_tcp_server/Cargo.toml | 10 ++ .../08_04_nonblocking_tcp_server/src/main.rs | 28 ++++ examples/08_05_final_tcp_server/Cargo.toml | 11 ++ examples/08_05_final_tcp_server/src/main.rs | 144 ++++++++++++++++++ examples/Cargo.toml | 6 +- .../04_async_await_primer.md | 3 - .../05_http_server_example.md | 78 ---------- src/03_async_await/01_chapter.md | 4 +- src/08_example/00_intro.md | 23 +++ src/08_example/01_running_async_code.md | 51 +++++++ .../02_handling_connections_concurrently.md | 34 +++++ src/08_example/03_combinators.md | 34 +++++ src/08_example/04_tests.md | 50 ++++++ src/SUMMARY.md | 6 +- 24 files changed, 530 insertions(+), 194 deletions(-) delete mode 100644 examples/01_05_http_server/Cargo.toml delete mode 100644 examples/01_05_http_server/src/lib.rs create mode 100644 examples/08_01_sync_tcp_server/404.html create mode 100644 examples/08_01_sync_tcp_server/Cargo.toml create mode 100644 examples/08_01_sync_tcp_server/hello.html create mode 100644 examples/08_01_sync_tcp_server/src/main.rs create mode 100644 examples/08_02_async_tcp_server/Cargo.toml create mode 100644 examples/08_02_async_tcp_server/src/main.rs create mode 100644 examples/08_03_concurrent_tcp_server/Cargo.toml create mode 100644 examples/08_03_concurrent_tcp_server/src/main.rs create mode 100644 examples/08_04_nonblocking_tcp_server/Cargo.toml create mode 100644 examples/08_04_nonblocking_tcp_server/src/main.rs create mode 100644 examples/08_05_final_tcp_server/Cargo.toml create mode 100644 examples/08_05_final_tcp_server/src/main.rs delete mode 100644 src/01_getting_started/05_http_server_example.md create mode 100644 src/08_example/00_intro.md create mode 100644 src/08_example/01_running_async_code.md create mode 100644 src/08_example/02_handling_connections_concurrently.md create mode 100644 src/08_example/03_combinators.md create mode 100644 src/08_example/04_tests.md diff --git a/examples/01_05_http_server/Cargo.toml b/examples/01_05_http_server/Cargo.toml deleted file mode 100644 index 7c1d7f0e..00000000 --- a/examples/01_05_http_server/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "example_01_05_http_server" -version = "0.2.0" -authors = ["Taylor Cramer "] -edition = "2018" - -[lib] - -[dependencies] -# Hyper is an asynchronous HTTP library. We'll use it to power our HTTP -# server and to make HTTP requests. -hyper = "0.13" -# To setup some sort of runtime needed by Hyper, we will use the Tokio runtime. -tokio = { version = "0.2", features = ["full"] } - -# (only for testing) -anyhow = "1.0.31" -reqwest = { version = "0.10.4", features = ["blocking"] } diff --git a/examples/01_05_http_server/src/lib.rs b/examples/01_05_http_server/src/lib.rs deleted file mode 100644 index c3a9a949..00000000 --- a/examples/01_05_http_server/src/lib.rs +++ /dev/null @@ -1,91 +0,0 @@ -#![cfg(test)] - -// ANCHOR: imports -use { - hyper::{ - // Following functions are used by Hyper to handle a `Request` - // and returning a `Response` in an asynchronous manner by using a Future - service::{make_service_fn, service_fn}, - // Miscellaneous types from Hyper for working with HTTP. - Body, - Client, - Request, - Response, - Server, - Uri, - }, - std::net::SocketAddr, -}; -// ANCHOR_END: imports - -// ANCHOR: boilerplate -async fn serve_req(_req: Request) -> Result, hyper::Error> { - // Always return successfully with a response containing a body with - // a friendly greeting ;) - Ok(Response::new(Body::from("hello, world!"))) -} - -async fn run_server(addr: SocketAddr) { - println!("Listening on http://{}", addr); - - // Create a server bound on the provided address - let serve_future = Server::bind(&addr) - // Serve requests using our `async serve_req` function. - // `serve` takes a type which implements the `MakeService` trait. - // `make_service_fn` converts a closure into a type which - // implements the `MakeService` trait. That closure must return a - // type that implements the `Service` trait, and `service_fn` - // converts a request-response function into a type that implements - // the `Service` trait. - .serve(make_service_fn(|_| async { - Ok::<_, hyper::Error>(service_fn(serve_req)) - })); - - // Wait for the server to complete serving or exit with an error. - // If an error occurred, print it to stderr. - if let Err(e) = serve_future.await { - eprintln!("server error: {}", e); - } -} - -#[tokio::main] -async fn main() { - // Set the address to run our socket on. - let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); - - // Call our `run_server` function, which returns a future. - // As with every `async fn`, for `run_server` to do anything, - // the returned future needs to be run using `await`; - run_server(addr).await; -} -// ANCHOR_END: boilerplate - -#[test] -fn run_main_and_query_http() -> Result<(), anyhow::Error> { - std::thread::spawn(main); - // Unfortunately, there's no good way for us to detect when the server - // has come up, so we sleep for an amount that should hopefully be - // sufficient :( - std::thread::sleep(std::time::Duration::from_secs(5)); - let response = reqwest::blocking::get("http://localhost:3000")?.text()?; - assert_eq!(response, "hello, world!"); - Ok(()) -} - -mod proxy { - use super::*; - #[allow(unused)] - async fn serve_req(_req: Request) -> Result, hyper::Error> { - // ANCHOR: parse_url - let url_str = "http://www.rust-lang.org/en-US/"; - let url = url_str.parse::().expect("failed to parse URL"); - // ANCHOR_END: parse_url - - // ANCHOR: get_request - let res = Client::new().get(url).await?; - // Return the result of the request directly to the user - println!("request finished-- returning response"); - Ok(res) - // ANCHOR_END: get_request - } -} diff --git a/examples/08_01_sync_tcp_server/404.html b/examples/08_01_sync_tcp_server/404.html new file mode 100644 index 00000000..88d8e915 --- /dev/null +++ b/examples/08_01_sync_tcp_server/404.html @@ -0,0 +1,11 @@ + + + + + Hello! + + +

Oops!

+

Sorry, I don't know what you're asking for.

+ + diff --git a/examples/08_01_sync_tcp_server/Cargo.toml b/examples/08_01_sync_tcp_server/Cargo.toml new file mode 100644 index 00000000..3048a1e7 --- /dev/null +++ b/examples/08_01_sync_tcp_server/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "sync_tcp_server" +version = "0.1.0" +authors = ["Your Name + + + + Hello! + + +

Hello!

+

Hi from Rust

+ + diff --git a/examples/08_01_sync_tcp_server/src/main.rs b/examples/08_01_sync_tcp_server/src/main.rs new file mode 100644 index 00000000..8af6b4a1 --- /dev/null +++ b/examples/08_01_sync_tcp_server/src/main.rs @@ -0,0 +1,39 @@ +use std::fs; +use std::io::prelude::*; +use std::net::TcpListener; +use std::net::TcpStream; + +fn main() { + // Listen for incoming TCP connections on localhost port 7878 + let listener = TcpListener::bind("127.0.0.1:7878").unwrap(); + + // Block forever, handling each request that arrives at this IP address + for stream in listener.incoming() { + let stream = stream.unwrap(); + + handle_connection(stream); + } +} + +fn handle_connection(mut stream: TcpStream) { + // Read the first 1024 bytes of data from the stream + let mut buffer = [0; 1024]; + stream.read(&mut buffer).unwrap(); + + let get = b"GET / HTTP/1.1\r\n"; + + // Respond with greetings or a 404, + // depending on the data in the request + let (status_line, filename) = if buffer.starts_with(get) { + ("HTTP/1.1 200 OK\r\n\r\n", "hello.html") + } else { + ("HTTP/1.1 404 NOT FOUND\r\n\r\n", "404.html") + }; + let contents = fs::read_to_string(filename).unwrap(); + + // Write response back to the stream, + // and flush the stream to ensure the response is sent back to the client + let response = format!("{}{}", status_line, contents); + stream.write(response.as_bytes()).unwrap(); + stream.flush().unwrap(); +} \ No newline at end of file diff --git a/examples/08_02_async_tcp_server/Cargo.toml b/examples/08_02_async_tcp_server/Cargo.toml new file mode 100644 index 00000000..d471d262 --- /dev/null +++ b/examples/08_02_async_tcp_server/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "async_tcp_server" +version = "0.1.0" +authors = ["Your Name +} +// ANCHOR_END: handle_connection_async diff --git a/examples/08_03_concurrent_tcp_server/Cargo.toml b/examples/08_03_concurrent_tcp_server/Cargo.toml new file mode 100644 index 00000000..07b882f5 --- /dev/null +++ b/examples/08_03_concurrent_tcp_server/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "concurrent_tcp_server" +version = "0.1.0" +authors = ["Your Name +} diff --git a/examples/08_04_nonblocking_tcp_server/Cargo.toml b/examples/08_04_nonblocking_tcp_server/Cargo.toml new file mode 100644 index 00000000..276c4d92 --- /dev/null +++ b/examples/08_04_nonblocking_tcp_server/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "nonblocking_tcp_server" +version = "0.1.0" +authors = ["Your Name + stream.write(response.as_bytes()).await.unwrap(); + stream.flush().await.unwrap(); +} +// ANCHOR_END: handle_connection diff --git a/examples/08_05_final_tcp_server/Cargo.toml b/examples/08_05_final_tcp_server/Cargo.toml new file mode 100644 index 00000000..7b1e8e00 --- /dev/null +++ b/examples/08_05_final_tcp_server/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "final_tcp_server" +version = "0.1.0" +authors = ["Your Name +} +// ANCHOR_END: handle_connection + +// ANCHOR: slow_functions +use async_std::task::sleep; + +async fn write_to_database() { + // Simulate a slow request + sleep(Duration::from_secs(2)).await; +} + +async fn add_to_queue() { + // Simulate a slow request + sleep(Duration::from_secs(1)).await; +} +// ANCHOR_END: slow_functions + +async fn foo() { + // ANCHOR: serial_execution + let now = Instant::now(); + write_to_database().await; + add_to_queue().await; + println!( + "Write to database + add to queue took {} seconds", + now.elapsed().as_secs() + ); + // ANCHOR_END: serial_execution +} + +async fn bar() { + // ANCHOR: parallel_execution + let now = Instant::now(); + join!(write_to_database(), add_to_queue()); + println!( + "Write to database + add to queue took {} seconds", + now.elapsed().as_secs() + ); + // ANCHOR_END: parallel_execution + +} + +#[cfg(test)] + +mod tests { + // ANCHOR: mock_read + use super::*; + use futures::io::Error; + use futures::task::{Context, Poll}; + + use std::cmp::min; + use std::pin::Pin; + + struct MockTcpStream { + read_data: Vec, + write_data: Vec, + } + + impl Read for MockTcpStream { + fn poll_read( + self: Pin<&mut Self>, + _: &mut Context, + buf: &mut [u8], + ) -> Poll> { + let size: usize = min(self.read_data.len(), buf.len()); + buf.copy_from_slice(&self.read_data[..size]); + Poll::Ready(Ok(size)) + } + } + // ANCHOR_END: mock_read + + // ANCHOR: mock_write + impl Write for MockTcpStream { + fn poll_write( + mut self: Pin<&mut Self>, + _: &mut Context, + buf: &[u8], + ) -> Poll> { + self.write_data = Vec::from(buf); + return Poll::Ready(Ok(buf.len())); + } + fn poll_flush(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + fn poll_close(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + } + // ANCHOR_END: mock_write + + // ANCHOR: unpin + use std::marker::Unpin; + impl Unpin for MockTcpStream {} + // ANCHOR_END: unpin + + // ANCHOR: test + use std::fs; + + #[test] + fn test_handle_connection() { + let input_bytes = b"GET / HTTP/1.1\r\n"; + let mut contents = vec![0u8; 1024]; + contents[..input_bytes.len()].clone_from_slice(input_bytes); + let mut stream = MockTcpStream { + read_data: contents, + write_data: Vec::new(), + }; + + block_on(async { + handle_connection(&mut stream).await; + let mut buf = [0u8; 1024]; + stream.read(&mut buf).await.unwrap(); + }); + + let expected_contents = fs::read_to_string("hello.html").unwrap(); + let expected_response = format!("HTTP/1.1 200 OK\r\n\r\n{}", expected_contents); + assert!(stream.write_data.starts_with(expected_response.as_bytes())); + } + // ANCHOR_END: test +} + diff --git a/examples/Cargo.toml b/examples/Cargo.toml index b89b3115..582345b7 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -2,7 +2,6 @@ members = [ "01_02_why_async", "01_04_async_await_primer", - "01_05_http_server", "02_02_future_trait", "02_03_timer", "02_04_executor", @@ -12,4 +11,9 @@ members = [ "06_02_join", "06_03_select", "07_05_recursion", + "08_01_sync_tcp_server", + "08_02_async_tcp_server", + "08_03_concurrent_tcp_server", + "08_04_nonblocking_tcp_server", + "08_05_final_tcp_server", ] diff --git a/src/01_getting_started/04_async_await_primer.md b/src/01_getting_started/04_async_await_primer.md index 5a887746..c2eaeff0 100644 --- a/src/01_getting_started/04_async_await_primer.md +++ b/src/01_getting_started/04_async_await_primer.md @@ -66,6 +66,3 @@ This would make it impossible to dance at the same time. By `.await`-ing the `learn_song` future, we allow other tasks to take over the current thread if `learn_song` is blocked. This makes it possible to run multiple futures to completion concurrently on the same thread. - -Now that you've learned the basics of `async`/`await`, let's try out an -example. diff --git a/src/01_getting_started/05_http_server_example.md b/src/01_getting_started/05_http_server_example.md deleted file mode 100644 index 521012f2..00000000 --- a/src/01_getting_started/05_http_server_example.md +++ /dev/null @@ -1,78 +0,0 @@ -# Applied: Simple HTTP Server - -Let's use `async`/`.await` to build an echo server! - -To start, run `rustup update stable` to make sure you've got stable Rust 1.39 or newer. Once you've done that, run -`cargo new async-await-echo` to create a new project, and open up -the resulting `async-await-echo` folder. - -Let's add some dependencies to the `Cargo.toml` file: - -```toml -{{#include ../../examples/01_05_http_server/Cargo.toml:9:18}} -``` - -Now that we've got our dependencies out of the way, let's start writing some -code. We have some imports to add: - -```rust,ignore -{{#include ../../examples/01_05_http_server/src/lib.rs:imports}} -``` - -Once the imports are out of the way, we can start putting together the -boilerplate to allow us to serve requests: - -```rust,ignore -{{#include ../../examples/01_05_http_server/src/lib.rs:boilerplate}} -``` - -If you `cargo run` now, you should see the message "Listening on -http://127.0.0.1:3000" printed on your terminal. If you open that URL in your -browser of choice, you'll see "hello, world!" appear in your browser. -Congratulations! You just wrote your first asynchronous webserver in Rust. - -You can also inspect the request itself, which contains information such as -the request URI, HTTP version, headers, and other metadata. For example, we -can print out the URI of the request like this: - -```rust,ignore -println!("Got request at {:?}", _req.uri()); -``` - -You may have noticed that we're not yet doing -anything asynchronous when handling the request-- we just respond immediately, -so we're not taking advantage of the flexibility that `async fn` gives us. -Rather than just returning a static message, let's try proxying the user's -request to another website using Hyper's HTTP client. - -We start by parsing out the URL we want to request: - -```rust,ignore -{{#include ../../examples/01_05_http_server/src/lib.rs:parse_url}} -``` - -Then we can create a new `hyper::Client` and use it to make a `GET` request, -returning the response to the user: - -```rust,ignore -{{#include ../../examples/01_05_http_server/src/lib.rs:get_request}} -``` - -`Client::get` returns a `hyper::client::ResponseFuture`, which implements -`Future>>` -(or `Future, Error = Error>` in futures 0.1 terms). -When we `.await` that future, an HTTP request is sent out, the current task -is suspended, and the task is queued to be continued once a response has -become available. - -Now, if you `cargo run` and open `http://127.0.0.1:3000/foo` in your browser, -you'll see the Rust homepage, and the following terminal output: - -``` -Listening on http://127.0.0.1:3000 -Got request at /foo -making request to http://www.rust-lang.org/en-US/ -request finished-- returning response -``` - -Congratulations! You just proxied an HTTP request. diff --git a/src/03_async_await/01_chapter.md b/src/03_async_await/01_chapter.md index 3d4d72f5..c4f6cfe7 100644 --- a/src/03_async_await/01_chapter.md +++ b/src/03_async_await/01_chapter.md @@ -1,7 +1,7 @@ # `async`/`.await` -In [the first chapter], we took a brief look at `async`/`.await` and used -it to build a simple server. This chapter will discuss `async`/`.await` in +In [the first chapter], we took a brief look at `async`/`.await`. +This chapter will discuss `async`/`.await` in greater detail, explaining how it works and how `async` code differs from traditional Rust programs. diff --git a/src/08_example/00_intro.md b/src/08_example/00_intro.md new file mode 100644 index 00000000..142b9a45 --- /dev/null +++ b/src/08_example/00_intro.md @@ -0,0 +1,23 @@ +# Applied: Building a Concurrent Web Server with Async Rust +In this chapter, we'll use asynchronous Rust to modify the Rust book's +[single-threaded web server](https://doc.rust-lang.org/book/ch20-01-single-threaded.html) +to serve requests concurrently. +Here's what the code looked like at the end of the lesson. + +`src/main.rs`: +```rust +{{#include ../../examples/08_01_sync_tcp_server/src/main.rs}} +``` + +`hello.html`: +```html +{{#include ../../examples/08_01_sync_tcp_server/hello.html}} +``` + +`404.html`: +```html +{{#include ../../examples/08_01_sync_tcp_server/404.html}} +``` + +If you run the server with `cargo run` and visit `127.0.0.1:7878` in your browser, +you'll be greeted with a friendly message from Ferris! \ No newline at end of file diff --git a/src/08_example/01_running_async_code.md b/src/08_example/01_running_async_code.md new file mode 100644 index 00000000..b67944f4 --- /dev/null +++ b/src/08_example/01_running_async_code.md @@ -0,0 +1,51 @@ +# Running Asynchronous Code +As [the book explains](https://doc.rust-lang.org/book/ch20-02-multithreaded.html#turning-our-single-threaded-server-into-a-multithreaded-server), +we don't want our web server to wait for each request to finish before handling the next, +as some requests could be very slow. +Instead of improving throughput by adding threads, +we'll use asynchronous code to process requests concurrently. + +Let's modify `handle_connection` to return a future by declaring it an `async fn`: +```rust,ignore +{{#include ../../examples/08_02_async_tcp_server/src/main.rs:handle_connection_async}} +``` + +Adding `async` to the function declaration changes its return type +from the unit type `()` to a type that implements `Future`. + +If we try to compile this, the compiler warns us that it will not work: +```console +$ cargo check + Checking async-rust v0.1.0 (file:///projects/async-rust) +warning: unused implementer of `std::future::Future` that must be used + --> src/main.rs:12:9 + | +12 | handle_connection(stream); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: `#[warn(unused_must_use)]` on by default + = note: futures do nothing unless you `.await` or poll them +``` + +Because we haven't `await`ed or `poll`ed the result of `handle_connection`, +it'll never run. If you run the server and visit `127.0.0.1:7878` in a browser, +you'll see that the connection is refused; our server is not handling requests. + +We can't `await` or `poll` futures within synchronous code by itself. +We'll need an executor to handle scheduling and running futures to completion. +Please consult the section [Choosing an Executor](../404.md) for more information on executors. +Here, we'll use the `block_on` executor from the `async_std` crate. + +It might be tempting to write something like this: +```rust +{{#include ../../examples/08_02_async_tcp_server/src/main.rs:main_func}} +``` + +However, just because this program uses an asynchronous connection handler +doesn't mean that it handles connections concurrently. +To illustrate this, try out the +[simulation of a slow request](https://doc.rust-lang.org/book/ch20-02-multithreaded.html#simulating-a-slow-request-in-the-current-server-implementation) +from the Book. You'll see that one slow request will block any other incoming requests! +This is because there are no other concurrent tasks that can make progress +while we are `await`ing the result of `handle_connection`. +We'll see how to avoid this in the next section. \ No newline at end of file diff --git a/src/08_example/02_handling_connections_concurrently.md b/src/08_example/02_handling_connections_concurrently.md new file mode 100644 index 00000000..9e017223 --- /dev/null +++ b/src/08_example/02_handling_connections_concurrently.md @@ -0,0 +1,34 @@ +# Handling Connections Concurrently + +The problem with our code so far is that `listener.incoming()` is a blocking iterator; +we can't read a new request from this stream until we're done with the previous one. +One strategy to work around this is to spawn a new Task to handle each connection in the background: +```rust +{{#include ../../examples/08_03_concurrent_tcp_server/src/main.rs:main_func}} +``` + +This works because under the hood, the `async_std` executor runs `handle_connection` on a separate thread. +However, this doesn't completely solve our problem: `listener.incoming()` still blocks the executor. +Even if connections are handled in separate threads, futures running on the main thread +are blocked while `listener` waits on incoming connections. + +In order to fix this, we can replace our blocking `std::net::TcpListener` with the non-blocking `async_std::net::TcpListener`. + +This change prevents `listener.incoming()` from blocking the executor +by allowing us to `await` the next TCP connection on this port. +Now, the executor can yield to other futures running on the main thread +while there are no incoming TCP connections to be processed. +(Note that this change still does *not* allow `listener.incoming()` to emit items concurrently. +We still need to process a stream or spawn a task to handle it before moving on to the next one.) + +Let's update our example to make use of the asynchronous `TcpListener`. +First, we'll need to update our code to `await` the next incoming connection, +rather than iterating over `listener.incoming()`: +```rust +{{#include ../../examples/08_04_nonblocking_tcp_server/src/main.rs:main_func}} +``` + +Lastly, we'll have to update our connection handler to accept an `async_std::net::TcpStream`: +```rust,ignore +{{#include ../../examples/08_04_nonblocking_tcp_server/src/main.rs:handle_connection}} +``` diff --git a/src/08_example/03_combinators.md b/src/08_example/03_combinators.md new file mode 100644 index 00000000..81bc7fc7 --- /dev/null +++ b/src/08_example/03_combinators.md @@ -0,0 +1,34 @@ +# Multiple Concurrent Actions Per Request +So far, the only way we've been able to run tasks concurrently has been to run them on separate threads. +Asynchronous code wouldn't be very useful if we could only achieve concurrency through multithreading. +Let's see how we can run multiple asynchronous tasks on a single thread. + +Imagine we wanted to perform some more tasks with each incoming TCP connection. +For example, we might want to write information about the request to a database, +or put some data from the request onto a queue for processing. +Both of these actions can block, meaning that running them asynchronously will likely improve performance. + +Let's simulate a slow request to a database or a blocking request to a queue: +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:slow_functions}} +``` + +A common mistake is to use `std::thread::sleep`, a blocking function, to simulate slow requests in examples like this one. +It's important to remember that even if a piece of code is run within an `async fn` and `await`ed, it may still block. +To make this example work, we'll need to replace `std::thread::sleep` with the non-blocking variant `async_std::task::sleep`. + +Now, let's run `write_to_database` and `add_to_queue` within `handle_connection`: +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:serial_execution}} +``` + +If you run this code and visit `127.0.0.1:7878` in a browser, you'll see +"Write to database + add to queue took 5 seconds" printed to the console. +The request took 5 seconds because the program can only add to the queue once writing to the database has completed. + +To run these two asynchronous functions concurrently, we can use the `join` combinator from the `futures` crate: +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:parallel_execution}} +``` +Handling a request will now take only 3 seconds. We've successfully run two tasks concurrently on one thread! +Please see the [section on combinators](../06_multiple_futures/01_chapter.md) for more information and examples. diff --git a/src/08_example/04_tests.md b/src/08_example/04_tests.md new file mode 100644 index 00000000..755782e6 --- /dev/null +++ b/src/08_example/04_tests.md @@ -0,0 +1,50 @@ +# Testing Async Code +Let's move on to testing our `handle_connection` function. +First, we need a `TcpStream` to work with, but we don't want to make a real TCP connection in test code. +We could work around this in a few ways. +One strategy could be to refactor the code to be more modular, +and only test that the correct responses are returned for the respective inputs. + +Another strategy is to connect to `localhost` on port 0. +Port 0 isn't a valid UNIX port, but it'll work for testing. +The operating system will return a connection on any open TCP port. + +Instead of those strategies, we'll change the signature of `handle_connection` to make it easier to test. +`handle_connection` doesn't actually require an `async_std::net::TcpStream`; +it requires any struct that implements `async_std::io::Read`, `async_std::io::Write`, and `marker::Unpin`. +Changing the type signature to reflect this allows us to pass a mock for testing instead of a TcpStream. +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:handle_connection}} +``` + +Next, let's build a mock `TcpStream` that implements these traits. +First, let's implement the `Read` trait, with one method, `poll_read`. +Our mock `TcpStream` will contain some data that is copied into the read buffer, +and we'll return `Poll::Ready` to signify that the read is complete. +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:mock_read}} +``` + +Our implementation of `Write` is very similar, +although we'll need to write three methods: `poll_write`, `poll_flush`, and `poll_close`. +`poll_write` will copy any input data into the mock `TcpStream`, and return `Poll::Ready` when complete. +No work needs to be done to flush or close the mock `TcpStream`, so `poll_flush` and `poll_close` +can just return `Poll::Ready`. +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:mock_write}} +``` + +Lastly, our mock will need to implement `Unpin`, signifying that its location in memory can safely be moved. +For more information on pinning and the `Unpin` trait, see the [section on pinning](../04_pinning/01_chapter.md). +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:unpin}} +``` + +Now we're ready to test the `handle_connection` function. +After setting up the `MockTcpStream` containing some initial data, +we can run `handle_connection` using `async_std::task::block_on`, exactly as we did in the main method. +To ensure that `handle_connection` works as intended, we'll check that the correct data +was written to the `MockTcpStream` based on its initial contents. +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:test}} +``` \ No newline at end of file diff --git a/src/SUMMARY.md b/src/SUMMARY.md index 92f22f0f..bbdf925a 100644 --- a/src/SUMMARY.md +++ b/src/SUMMARY.md @@ -4,7 +4,6 @@ - [Why Async?](01_getting_started/02_why_async.md) - [The State of Asynchronous Rust](01_getting_started/03_state_of_async_rust.md) - [`async`/`.await` Primer](01_getting_started/04_async_await_primer.md) - - [Applied: HTTP Server](01_getting_started/05_http_server_example.md) - [Under the Hood: Executing `Future`s and Tasks](02_execution/01_chapter.md) - [The `Future` Trait](02_execution/02_future.md) - [Task Wakeups with `Waker`](02_execution/03_wakeups.md) @@ -26,6 +25,11 @@ - [`Send` Approximation](07_workarounds/04_send_approximation.md) - [Recursion](07_workarounds/05_recursion.md) - [`async` in Traits](07_workarounds/06_async_in_traits.md) +- [Applied: HTTP Server](08_example/00_intro.md) + - [Running Asynchronous Code](08_example/01_running_async_code.md) + - [Handling Connections Concurrently](08_example/02_handling_connections_concurrently.md) + - [Multiple Actions Per Request](08_example/03_combinators.md) + - [Testing the Server](08_example/04_tests.md) - [TODO: I/O](404.md) - [TODO: `AsyncRead` and `AsyncWrite`](404.md) - [TODO: Asynchronous Design Patterns: Solutions and Suggestions](404.md) From aaf281bca8d389f95b768b48ebbb4f4e858252de Mon Sep 17 00:00:00 2001 From: Lee Bernick Date: Mon, 31 Aug 2020 16:45:33 -0400 Subject: [PATCH 02/12] More beginner-friendly TCP server example --- examples/01_05_http_server/Cargo.toml | 18 --- examples/01_05_http_server/src/lib.rs | 91 ----------- examples/08_01_sync_tcp_server/404.html | 11 ++ examples/08_01_sync_tcp_server/Cargo.toml | 9 ++ examples/08_01_sync_tcp_server/hello.html | 11 ++ examples/08_01_sync_tcp_server/src/main.rs | 39 +++++ examples/08_02_async_tcp_server/Cargo.toml | 10 ++ examples/08_02_async_tcp_server/src/main.rs | 23 +++ .../08_03_concurrent_tcp_server/Cargo.toml | 10 ++ .../08_03_concurrent_tcp_server/src/main.rs | 20 +++ .../08_04_nonblocking_tcp_server/Cargo.toml | 10 ++ .../08_04_nonblocking_tcp_server/src/main.rs | 29 ++++ examples/08_05_final_tcp_server/Cargo.toml | 11 ++ examples/08_05_final_tcp_server/hello.html | 11 ++ examples/08_05_final_tcp_server/src/main.rs | 153 ++++++++++++++++++ examples/Cargo.toml | 6 +- .../04_async_await_primer.md | 3 - .../05_http_server_example.md | 78 --------- src/03_async_await/01_chapter.md | 4 +- src/08_example/00_intro.md | 23 +++ src/08_example/01_running_async_code.md | 51 ++++++ .../02_handling_connections_concurrently.md | 34 ++++ src/08_example/03_combinators.md | 34 ++++ src/08_example/04_tests.md | 53 ++++++ src/SUMMARY.md | 6 +- 25 files changed, 554 insertions(+), 194 deletions(-) delete mode 100644 examples/01_05_http_server/Cargo.toml delete mode 100644 examples/01_05_http_server/src/lib.rs create mode 100644 examples/08_01_sync_tcp_server/404.html create mode 100644 examples/08_01_sync_tcp_server/Cargo.toml create mode 100644 examples/08_01_sync_tcp_server/hello.html create mode 100644 examples/08_01_sync_tcp_server/src/main.rs create mode 100644 examples/08_02_async_tcp_server/Cargo.toml create mode 100644 examples/08_02_async_tcp_server/src/main.rs create mode 100644 examples/08_03_concurrent_tcp_server/Cargo.toml create mode 100644 examples/08_03_concurrent_tcp_server/src/main.rs create mode 100644 examples/08_04_nonblocking_tcp_server/Cargo.toml create mode 100644 examples/08_04_nonblocking_tcp_server/src/main.rs create mode 100644 examples/08_05_final_tcp_server/Cargo.toml create mode 100644 examples/08_05_final_tcp_server/hello.html create mode 100644 examples/08_05_final_tcp_server/src/main.rs delete mode 100644 src/01_getting_started/05_http_server_example.md create mode 100644 src/08_example/00_intro.md create mode 100644 src/08_example/01_running_async_code.md create mode 100644 src/08_example/02_handling_connections_concurrently.md create mode 100644 src/08_example/03_combinators.md create mode 100644 src/08_example/04_tests.md diff --git a/examples/01_05_http_server/Cargo.toml b/examples/01_05_http_server/Cargo.toml deleted file mode 100644 index 7c1d7f0e..00000000 --- a/examples/01_05_http_server/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "example_01_05_http_server" -version = "0.2.0" -authors = ["Taylor Cramer "] -edition = "2018" - -[lib] - -[dependencies] -# Hyper is an asynchronous HTTP library. We'll use it to power our HTTP -# server and to make HTTP requests. -hyper = "0.13" -# To setup some sort of runtime needed by Hyper, we will use the Tokio runtime. -tokio = { version = "0.2", features = ["full"] } - -# (only for testing) -anyhow = "1.0.31" -reqwest = { version = "0.10.4", features = ["blocking"] } diff --git a/examples/01_05_http_server/src/lib.rs b/examples/01_05_http_server/src/lib.rs deleted file mode 100644 index c3a9a949..00000000 --- a/examples/01_05_http_server/src/lib.rs +++ /dev/null @@ -1,91 +0,0 @@ -#![cfg(test)] - -// ANCHOR: imports -use { - hyper::{ - // Following functions are used by Hyper to handle a `Request` - // and returning a `Response` in an asynchronous manner by using a Future - service::{make_service_fn, service_fn}, - // Miscellaneous types from Hyper for working with HTTP. - Body, - Client, - Request, - Response, - Server, - Uri, - }, - std::net::SocketAddr, -}; -// ANCHOR_END: imports - -// ANCHOR: boilerplate -async fn serve_req(_req: Request) -> Result, hyper::Error> { - // Always return successfully with a response containing a body with - // a friendly greeting ;) - Ok(Response::new(Body::from("hello, world!"))) -} - -async fn run_server(addr: SocketAddr) { - println!("Listening on http://{}", addr); - - // Create a server bound on the provided address - let serve_future = Server::bind(&addr) - // Serve requests using our `async serve_req` function. - // `serve` takes a type which implements the `MakeService` trait. - // `make_service_fn` converts a closure into a type which - // implements the `MakeService` trait. That closure must return a - // type that implements the `Service` trait, and `service_fn` - // converts a request-response function into a type that implements - // the `Service` trait. - .serve(make_service_fn(|_| async { - Ok::<_, hyper::Error>(service_fn(serve_req)) - })); - - // Wait for the server to complete serving or exit with an error. - // If an error occurred, print it to stderr. - if let Err(e) = serve_future.await { - eprintln!("server error: {}", e); - } -} - -#[tokio::main] -async fn main() { - // Set the address to run our socket on. - let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); - - // Call our `run_server` function, which returns a future. - // As with every `async fn`, for `run_server` to do anything, - // the returned future needs to be run using `await`; - run_server(addr).await; -} -// ANCHOR_END: boilerplate - -#[test] -fn run_main_and_query_http() -> Result<(), anyhow::Error> { - std::thread::spawn(main); - // Unfortunately, there's no good way for us to detect when the server - // has come up, so we sleep for an amount that should hopefully be - // sufficient :( - std::thread::sleep(std::time::Duration::from_secs(5)); - let response = reqwest::blocking::get("http://localhost:3000")?.text()?; - assert_eq!(response, "hello, world!"); - Ok(()) -} - -mod proxy { - use super::*; - #[allow(unused)] - async fn serve_req(_req: Request) -> Result, hyper::Error> { - // ANCHOR: parse_url - let url_str = "http://www.rust-lang.org/en-US/"; - let url = url_str.parse::().expect("failed to parse URL"); - // ANCHOR_END: parse_url - - // ANCHOR: get_request - let res = Client::new().get(url).await?; - // Return the result of the request directly to the user - println!("request finished-- returning response"); - Ok(res) - // ANCHOR_END: get_request - } -} diff --git a/examples/08_01_sync_tcp_server/404.html b/examples/08_01_sync_tcp_server/404.html new file mode 100644 index 00000000..88d8e915 --- /dev/null +++ b/examples/08_01_sync_tcp_server/404.html @@ -0,0 +1,11 @@ + + + + + Hello! + + +

Oops!

+

Sorry, I don't know what you're asking for.

+ + diff --git a/examples/08_01_sync_tcp_server/Cargo.toml b/examples/08_01_sync_tcp_server/Cargo.toml new file mode 100644 index 00000000..3048a1e7 --- /dev/null +++ b/examples/08_01_sync_tcp_server/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "sync_tcp_server" +version = "0.1.0" +authors = ["Your Name + + + + Hello! + + +

Hello!

+

Hi from Rust

+ + diff --git a/examples/08_01_sync_tcp_server/src/main.rs b/examples/08_01_sync_tcp_server/src/main.rs new file mode 100644 index 00000000..8af6b4a1 --- /dev/null +++ b/examples/08_01_sync_tcp_server/src/main.rs @@ -0,0 +1,39 @@ +use std::fs; +use std::io::prelude::*; +use std::net::TcpListener; +use std::net::TcpStream; + +fn main() { + // Listen for incoming TCP connections on localhost port 7878 + let listener = TcpListener::bind("127.0.0.1:7878").unwrap(); + + // Block forever, handling each request that arrives at this IP address + for stream in listener.incoming() { + let stream = stream.unwrap(); + + handle_connection(stream); + } +} + +fn handle_connection(mut stream: TcpStream) { + // Read the first 1024 bytes of data from the stream + let mut buffer = [0; 1024]; + stream.read(&mut buffer).unwrap(); + + let get = b"GET / HTTP/1.1\r\n"; + + // Respond with greetings or a 404, + // depending on the data in the request + let (status_line, filename) = if buffer.starts_with(get) { + ("HTTP/1.1 200 OK\r\n\r\n", "hello.html") + } else { + ("HTTP/1.1 404 NOT FOUND\r\n\r\n", "404.html") + }; + let contents = fs::read_to_string(filename).unwrap(); + + // Write response back to the stream, + // and flush the stream to ensure the response is sent back to the client + let response = format!("{}{}", status_line, contents); + stream.write(response.as_bytes()).unwrap(); + stream.flush().unwrap(); +} \ No newline at end of file diff --git a/examples/08_02_async_tcp_server/Cargo.toml b/examples/08_02_async_tcp_server/Cargo.toml new file mode 100644 index 00000000..d471d262 --- /dev/null +++ b/examples/08_02_async_tcp_server/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "async_tcp_server" +version = "0.1.0" +authors = ["Your Name +} +// ANCHOR_END: handle_connection_async diff --git a/examples/08_03_concurrent_tcp_server/Cargo.toml b/examples/08_03_concurrent_tcp_server/Cargo.toml new file mode 100644 index 00000000..07b882f5 --- /dev/null +++ b/examples/08_03_concurrent_tcp_server/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "concurrent_tcp_server" +version = "0.1.0" +authors = ["Your Name +} diff --git a/examples/08_04_nonblocking_tcp_server/Cargo.toml b/examples/08_04_nonblocking_tcp_server/Cargo.toml new file mode 100644 index 00000000..276c4d92 --- /dev/null +++ b/examples/08_04_nonblocking_tcp_server/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "nonblocking_tcp_server" +version = "0.1.0" +authors = ["Your Name + stream.write(response.as_bytes()).await.unwrap(); + stream.flush().await.unwrap(); +} +// ANCHOR_END: handle_connection diff --git a/examples/08_05_final_tcp_server/Cargo.toml b/examples/08_05_final_tcp_server/Cargo.toml new file mode 100644 index 00000000..7b1e8e00 --- /dev/null +++ b/examples/08_05_final_tcp_server/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "final_tcp_server" +version = "0.1.0" +authors = ["Your Name + + + + Hello! + + +

Hello!

+

Hi from Rust

+ + diff --git a/examples/08_05_final_tcp_server/src/main.rs b/examples/08_05_final_tcp_server/src/main.rs new file mode 100644 index 00000000..97c3ed84 --- /dev/null +++ b/examples/08_05_final_tcp_server/src/main.rs @@ -0,0 +1,153 @@ +use std::fs; +use std::time::{Duration, Instant}; + +use futures::join; + +use async_std::net::TcpListener; +use async_std::prelude::*; +use async_std::task::{block_on, spawn}; + +fn main() { + block_on(async { + let listener = TcpListener::bind("127.0.0.1:7878").await.unwrap(); + + loop { + let (stream, _) = listener.accept().await.unwrap(); + spawn(handle_connection(stream)); + } + }) +} + +use async_std::io::{Read, Write}; +use std::marker::Unpin; + +async fn handle_connection(mut stream: impl Read + Write + Unpin) { + let mut buffer = [0; 1024]; + stream.read(&mut buffer).await.unwrap(); + let get = b"GET / HTTP/1.1\r\n"; + let (status_line, filename) = if buffer.starts_with(get) { + ("HTTP/1.1 200 OK\r\n\r\n", "hello.html") + } else { + ("HTTP/1.1 404 NOT FOUND\r\n\r\n", "404.html") + }; + let contents = fs::read_to_string(filename).unwrap(); + let response = format!("{}{}", status_line, contents); + stream.write(response.as_bytes()).await.unwrap(); + stream.flush().await.unwrap(); +} + +// ANCHOR: slow_functions +use async_std::task::sleep; + +async fn write_to_database() { + // Simulate a slow request + sleep(Duration::from_secs(2)).await; +} + +async fn add_to_queue() { + // Simulate a slow request + sleep(Duration::from_secs(1)).await; +} +// ANCHOR_END: slow_functions + +async fn foo() { + // ANCHOR: serial_execution + let now = Instant::now(); + write_to_database().await; + add_to_queue().await; + println!( + "Write to database + add to queue took {} seconds", + now.elapsed().as_secs() + ); + // ANCHOR_END: serial_execution +} + +async fn bar() { + // ANCHOR: parallel_execution + let now = Instant::now(); + join!(write_to_database(), add_to_queue()); + println!( + "Write to database + add to queue took {} seconds", + now.elapsed().as_secs() + ); + // ANCHOR_END: parallel_execution +} + +#[cfg(test)] + +mod tests { + // ANCHOR: mock_read + use super::*; + use futures::io::Error; + use futures::task::{Context, Poll}; + + use std::cmp::min; + use std::pin::Pin; + + struct MockTcpStream { + read_data: Vec, + write_data: Vec, + } + + impl Read for MockTcpStream { + fn poll_read( + self: Pin<&mut Self>, + _: &mut Context, + buf: &mut [u8], + ) -> Poll> { + let size: usize = min(self.read_data.len(), buf.len()); + buf.copy_from_slice(&self.read_data[..size]); + Poll::Ready(Ok(size)) + } + } + // ANCHOR_END: mock_read + + // ANCHOR: mock_write + impl Write for MockTcpStream { + fn poll_write( + mut self: Pin<&mut Self>, + _: &mut Context, + buf: &[u8], + ) -> Poll> { + self.write_data = Vec::from(buf); + return Poll::Ready(Ok(buf.len())); + } + fn poll_flush(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + fn poll_close(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + } + // ANCHOR_END: mock_write + + // ANCHOR: unpin + use std::marker::Unpin; + impl Unpin for MockTcpStream {} + // ANCHOR_END: unpin + + // ANCHOR: test + use std::fs; + + #[test] + fn test_handle_connection() { + let input_bytes = b"GET / HTTP/1.1\r\n"; + let mut contents = vec![0u8; 1024]; + contents[..input_bytes.len()].clone_from_slice(input_bytes); + let mut stream = MockTcpStream { + read_data: contents, + write_data: Vec::new(), + }; + + block_on(async { + handle_connection(&mut stream).await; + let mut buf = [0u8; 1024]; + stream.read(&mut buf).await.unwrap(); + }); + + let expected_contents = fs::read_to_string("hello.html").unwrap(); + let expected_response = format!("HTTP/1.1 200 OK\r\n\r\n{}", expected_contents); + assert!(stream.write_data.starts_with(expected_response.as_bytes())); + } + // ANCHOR_END: test +} diff --git a/examples/Cargo.toml b/examples/Cargo.toml index b89b3115..582345b7 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -2,7 +2,6 @@ members = [ "01_02_why_async", "01_04_async_await_primer", - "01_05_http_server", "02_02_future_trait", "02_03_timer", "02_04_executor", @@ -12,4 +11,9 @@ members = [ "06_02_join", "06_03_select", "07_05_recursion", + "08_01_sync_tcp_server", + "08_02_async_tcp_server", + "08_03_concurrent_tcp_server", + "08_04_nonblocking_tcp_server", + "08_05_final_tcp_server", ] diff --git a/src/01_getting_started/04_async_await_primer.md b/src/01_getting_started/04_async_await_primer.md index 5a887746..c2eaeff0 100644 --- a/src/01_getting_started/04_async_await_primer.md +++ b/src/01_getting_started/04_async_await_primer.md @@ -66,6 +66,3 @@ This would make it impossible to dance at the same time. By `.await`-ing the `learn_song` future, we allow other tasks to take over the current thread if `learn_song` is blocked. This makes it possible to run multiple futures to completion concurrently on the same thread. - -Now that you've learned the basics of `async`/`await`, let's try out an -example. diff --git a/src/01_getting_started/05_http_server_example.md b/src/01_getting_started/05_http_server_example.md deleted file mode 100644 index 521012f2..00000000 --- a/src/01_getting_started/05_http_server_example.md +++ /dev/null @@ -1,78 +0,0 @@ -# Applied: Simple HTTP Server - -Let's use `async`/`.await` to build an echo server! - -To start, run `rustup update stable` to make sure you've got stable Rust 1.39 or newer. Once you've done that, run -`cargo new async-await-echo` to create a new project, and open up -the resulting `async-await-echo` folder. - -Let's add some dependencies to the `Cargo.toml` file: - -```toml -{{#include ../../examples/01_05_http_server/Cargo.toml:9:18}} -``` - -Now that we've got our dependencies out of the way, let's start writing some -code. We have some imports to add: - -```rust,ignore -{{#include ../../examples/01_05_http_server/src/lib.rs:imports}} -``` - -Once the imports are out of the way, we can start putting together the -boilerplate to allow us to serve requests: - -```rust,ignore -{{#include ../../examples/01_05_http_server/src/lib.rs:boilerplate}} -``` - -If you `cargo run` now, you should see the message "Listening on -http://127.0.0.1:3000" printed on your terminal. If you open that URL in your -browser of choice, you'll see "hello, world!" appear in your browser. -Congratulations! You just wrote your first asynchronous webserver in Rust. - -You can also inspect the request itself, which contains information such as -the request URI, HTTP version, headers, and other metadata. For example, we -can print out the URI of the request like this: - -```rust,ignore -println!("Got request at {:?}", _req.uri()); -``` - -You may have noticed that we're not yet doing -anything asynchronous when handling the request-- we just respond immediately, -so we're not taking advantage of the flexibility that `async fn` gives us. -Rather than just returning a static message, let's try proxying the user's -request to another website using Hyper's HTTP client. - -We start by parsing out the URL we want to request: - -```rust,ignore -{{#include ../../examples/01_05_http_server/src/lib.rs:parse_url}} -``` - -Then we can create a new `hyper::Client` and use it to make a `GET` request, -returning the response to the user: - -```rust,ignore -{{#include ../../examples/01_05_http_server/src/lib.rs:get_request}} -``` - -`Client::get` returns a `hyper::client::ResponseFuture`, which implements -`Future>>` -(or `Future, Error = Error>` in futures 0.1 terms). -When we `.await` that future, an HTTP request is sent out, the current task -is suspended, and the task is queued to be continued once a response has -become available. - -Now, if you `cargo run` and open `http://127.0.0.1:3000/foo` in your browser, -you'll see the Rust homepage, and the following terminal output: - -``` -Listening on http://127.0.0.1:3000 -Got request at /foo -making request to http://www.rust-lang.org/en-US/ -request finished-- returning response -``` - -Congratulations! You just proxied an HTTP request. diff --git a/src/03_async_await/01_chapter.md b/src/03_async_await/01_chapter.md index 3d4d72f5..c4f6cfe7 100644 --- a/src/03_async_await/01_chapter.md +++ b/src/03_async_await/01_chapter.md @@ -1,7 +1,7 @@ # `async`/`.await` -In [the first chapter], we took a brief look at `async`/`.await` and used -it to build a simple server. This chapter will discuss `async`/`.await` in +In [the first chapter], we took a brief look at `async`/`.await`. +This chapter will discuss `async`/`.await` in greater detail, explaining how it works and how `async` code differs from traditional Rust programs. diff --git a/src/08_example/00_intro.md b/src/08_example/00_intro.md new file mode 100644 index 00000000..142b9a45 --- /dev/null +++ b/src/08_example/00_intro.md @@ -0,0 +1,23 @@ +# Applied: Building a Concurrent Web Server with Async Rust +In this chapter, we'll use asynchronous Rust to modify the Rust book's +[single-threaded web server](https://doc.rust-lang.org/book/ch20-01-single-threaded.html) +to serve requests concurrently. +Here's what the code looked like at the end of the lesson. + +`src/main.rs`: +```rust +{{#include ../../examples/08_01_sync_tcp_server/src/main.rs}} +``` + +`hello.html`: +```html +{{#include ../../examples/08_01_sync_tcp_server/hello.html}} +``` + +`404.html`: +```html +{{#include ../../examples/08_01_sync_tcp_server/404.html}} +``` + +If you run the server with `cargo run` and visit `127.0.0.1:7878` in your browser, +you'll be greeted with a friendly message from Ferris! \ No newline at end of file diff --git a/src/08_example/01_running_async_code.md b/src/08_example/01_running_async_code.md new file mode 100644 index 00000000..b67944f4 --- /dev/null +++ b/src/08_example/01_running_async_code.md @@ -0,0 +1,51 @@ +# Running Asynchronous Code +As [the book explains](https://doc.rust-lang.org/book/ch20-02-multithreaded.html#turning-our-single-threaded-server-into-a-multithreaded-server), +we don't want our web server to wait for each request to finish before handling the next, +as some requests could be very slow. +Instead of improving throughput by adding threads, +we'll use asynchronous code to process requests concurrently. + +Let's modify `handle_connection` to return a future by declaring it an `async fn`: +```rust,ignore +{{#include ../../examples/08_02_async_tcp_server/src/main.rs:handle_connection_async}} +``` + +Adding `async` to the function declaration changes its return type +from the unit type `()` to a type that implements `Future`. + +If we try to compile this, the compiler warns us that it will not work: +```console +$ cargo check + Checking async-rust v0.1.0 (file:///projects/async-rust) +warning: unused implementer of `std::future::Future` that must be used + --> src/main.rs:12:9 + | +12 | handle_connection(stream); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: `#[warn(unused_must_use)]` on by default + = note: futures do nothing unless you `.await` or poll them +``` + +Because we haven't `await`ed or `poll`ed the result of `handle_connection`, +it'll never run. If you run the server and visit `127.0.0.1:7878` in a browser, +you'll see that the connection is refused; our server is not handling requests. + +We can't `await` or `poll` futures within synchronous code by itself. +We'll need an executor to handle scheduling and running futures to completion. +Please consult the section [Choosing an Executor](../404.md) for more information on executors. +Here, we'll use the `block_on` executor from the `async_std` crate. + +It might be tempting to write something like this: +```rust +{{#include ../../examples/08_02_async_tcp_server/src/main.rs:main_func}} +``` + +However, just because this program uses an asynchronous connection handler +doesn't mean that it handles connections concurrently. +To illustrate this, try out the +[simulation of a slow request](https://doc.rust-lang.org/book/ch20-02-multithreaded.html#simulating-a-slow-request-in-the-current-server-implementation) +from the Book. You'll see that one slow request will block any other incoming requests! +This is because there are no other concurrent tasks that can make progress +while we are `await`ing the result of `handle_connection`. +We'll see how to avoid this in the next section. \ No newline at end of file diff --git a/src/08_example/02_handling_connections_concurrently.md b/src/08_example/02_handling_connections_concurrently.md new file mode 100644 index 00000000..9e017223 --- /dev/null +++ b/src/08_example/02_handling_connections_concurrently.md @@ -0,0 +1,34 @@ +# Handling Connections Concurrently + +The problem with our code so far is that `listener.incoming()` is a blocking iterator; +we can't read a new request from this stream until we're done with the previous one. +One strategy to work around this is to spawn a new Task to handle each connection in the background: +```rust +{{#include ../../examples/08_03_concurrent_tcp_server/src/main.rs:main_func}} +``` + +This works because under the hood, the `async_std` executor runs `handle_connection` on a separate thread. +However, this doesn't completely solve our problem: `listener.incoming()` still blocks the executor. +Even if connections are handled in separate threads, futures running on the main thread +are blocked while `listener` waits on incoming connections. + +In order to fix this, we can replace our blocking `std::net::TcpListener` with the non-blocking `async_std::net::TcpListener`. + +This change prevents `listener.incoming()` from blocking the executor +by allowing us to `await` the next TCP connection on this port. +Now, the executor can yield to other futures running on the main thread +while there are no incoming TCP connections to be processed. +(Note that this change still does *not* allow `listener.incoming()` to emit items concurrently. +We still need to process a stream or spawn a task to handle it before moving on to the next one.) + +Let's update our example to make use of the asynchronous `TcpListener`. +First, we'll need to update our code to `await` the next incoming connection, +rather than iterating over `listener.incoming()`: +```rust +{{#include ../../examples/08_04_nonblocking_tcp_server/src/main.rs:main_func}} +``` + +Lastly, we'll have to update our connection handler to accept an `async_std::net::TcpStream`: +```rust,ignore +{{#include ../../examples/08_04_nonblocking_tcp_server/src/main.rs:handle_connection}} +``` diff --git a/src/08_example/03_combinators.md b/src/08_example/03_combinators.md new file mode 100644 index 00000000..81bc7fc7 --- /dev/null +++ b/src/08_example/03_combinators.md @@ -0,0 +1,34 @@ +# Multiple Concurrent Actions Per Request +So far, the only way we've been able to run tasks concurrently has been to run them on separate threads. +Asynchronous code wouldn't be very useful if we could only achieve concurrency through multithreading. +Let's see how we can run multiple asynchronous tasks on a single thread. + +Imagine we wanted to perform some more tasks with each incoming TCP connection. +For example, we might want to write information about the request to a database, +or put some data from the request onto a queue for processing. +Both of these actions can block, meaning that running them asynchronously will likely improve performance. + +Let's simulate a slow request to a database or a blocking request to a queue: +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:slow_functions}} +``` + +A common mistake is to use `std::thread::sleep`, a blocking function, to simulate slow requests in examples like this one. +It's important to remember that even if a piece of code is run within an `async fn` and `await`ed, it may still block. +To make this example work, we'll need to replace `std::thread::sleep` with the non-blocking variant `async_std::task::sleep`. + +Now, let's run `write_to_database` and `add_to_queue` within `handle_connection`: +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:serial_execution}} +``` + +If you run this code and visit `127.0.0.1:7878` in a browser, you'll see +"Write to database + add to queue took 5 seconds" printed to the console. +The request took 5 seconds because the program can only add to the queue once writing to the database has completed. + +To run these two asynchronous functions concurrently, we can use the `join` combinator from the `futures` crate: +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:parallel_execution}} +``` +Handling a request will now take only 3 seconds. We've successfully run two tasks concurrently on one thread! +Please see the [section on combinators](../06_multiple_futures/01_chapter.md) for more information and examples. diff --git a/src/08_example/04_tests.md b/src/08_example/04_tests.md new file mode 100644 index 00000000..6e2a5240 --- /dev/null +++ b/src/08_example/04_tests.md @@ -0,0 +1,53 @@ +# Testing Async Code +Let's move on to testing our `handle_connection` function. +First, we need a `TcpStream` to work with, but we don't want to make a real TCP connection in test code. +We could work around this in a few ways. +One strategy could be to refactor the code to be more modular, +and only test that the correct responses are returned for the respective inputs. + +Another strategy is to connect to `localhost` on port 0. +Port 0 isn't a valid UNIX port, but it'll work for testing. +The operating system will return a connection on any open TCP port. + +Instead of those strategies, we'll change the signature of `handle_connection` to make it easier to test. +`handle_connection` doesn't actually require an `async_std::net::TcpStream`; +it requires any struct that implements `async_std::io::Read`, `async_std::io::Write`, and `marker::Unpin`. +Changing the type signature to reflect this allows us to pass a mock for testing instead of a TcpStream. +```rust,ignore +use std::marker::Unpin; +use async_std::io::{Read, Write}; + +async fn handle_connection(mut stream: impl Read + Write + Unpin) { +``` + +Next, let's build a mock `TcpStream` that implements these traits. +First, let's implement the `Read` trait, with one method, `poll_read`. +Our mock `TcpStream` will contain some data that is copied into the read buffer, +and we'll return `Poll::Ready` to signify that the read is complete. +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:mock_read}} +``` + +Our implementation of `Write` is very similar, +although we'll need to write three methods: `poll_write`, `poll_flush`, and `poll_close`. +`poll_write` will copy any input data into the mock `TcpStream`, and return `Poll::Ready` when complete. +No work needs to be done to flush or close the mock `TcpStream`, so `poll_flush` and `poll_close` +can just return `Poll::Ready`. +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:mock_write}} +``` + +Lastly, our mock will need to implement `Unpin`, signifying that its location in memory can safely be moved. +For more information on pinning and the `Unpin` trait, see the [section on pinning](../04_pinning/01_chapter.md). +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:unpin}} +``` + +Now we're ready to test the `handle_connection` function. +After setting up the `MockTcpStream` containing some initial data, +we can run `handle_connection` using `async_std::task::block_on`, exactly as we did in the main method. +To ensure that `handle_connection` works as intended, we'll check that the correct data +was written to the `MockTcpStream` based on its initial contents. +```rust,ignore +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:test}} +``` \ No newline at end of file diff --git a/src/SUMMARY.md b/src/SUMMARY.md index 92f22f0f..bbdf925a 100644 --- a/src/SUMMARY.md +++ b/src/SUMMARY.md @@ -4,7 +4,6 @@ - [Why Async?](01_getting_started/02_why_async.md) - [The State of Asynchronous Rust](01_getting_started/03_state_of_async_rust.md) - [`async`/`.await` Primer](01_getting_started/04_async_await_primer.md) - - [Applied: HTTP Server](01_getting_started/05_http_server_example.md) - [Under the Hood: Executing `Future`s and Tasks](02_execution/01_chapter.md) - [The `Future` Trait](02_execution/02_future.md) - [Task Wakeups with `Waker`](02_execution/03_wakeups.md) @@ -26,6 +25,11 @@ - [`Send` Approximation](07_workarounds/04_send_approximation.md) - [Recursion](07_workarounds/05_recursion.md) - [`async` in Traits](07_workarounds/06_async_in_traits.md) +- [Applied: HTTP Server](08_example/00_intro.md) + - [Running Asynchronous Code](08_example/01_running_async_code.md) + - [Handling Connections Concurrently](08_example/02_handling_connections_concurrently.md) + - [Multiple Actions Per Request](08_example/03_combinators.md) + - [Testing the Server](08_example/04_tests.md) - [TODO: I/O](404.md) - [TODO: `AsyncRead` and `AsyncWrite`](404.md) - [TODO: Asynchronous Design Patterns: Solutions and Suggestions](404.md) From d14d0935f7799c2682f3f1642954e8ae89a636d4 Mon Sep 17 00:00:00 2001 From: Lee Bernick Date: Fri, 4 Sep 2020 17:27:03 -0400 Subject: [PATCH 03/12] Add new words to dictionary --- ci/dictionary.txt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ci/dictionary.txt b/ci/dictionary.txt index 98490d3f..89bfdc77 100644 --- a/ci/dictionary.txt +++ b/ci/dictionary.txt @@ -24,6 +24,7 @@ FutOne FutTwo FuturesUnordered GenFuture +html http Hyper's impl @@ -33,8 +34,11 @@ IoBlocker IOCP IoObject kqueue +localhost metadata +MockTcpStream multithreaded +multithreading Mutex MyError MyFut @@ -57,6 +61,7 @@ requeue ResponseFuture reusability runtime +runtimes rustc rustup SimpleFuture @@ -69,6 +74,8 @@ struct subfuture subfutures subpar +TcpListener +TcpStream threadpool TimerFuture TODO From eecf6eea50c3a30162f004a793cfc39948a32b02 Mon Sep 17 00:00:00 2001 From: Lee Bernick Date: Fri, 4 Sep 2020 17:31:04 -0400 Subject: [PATCH 04/12] Responding to comments from Didrik --- examples/01_02_why_async/src/lib.rs | 5 +- examples/01_04_async_await_primer/src/lib.rs | 110 +++--- examples/02_02_future_trait/src/lib.rs | 44 +-- examples/02_03_timer/src/lib.rs | 20 +- examples/02_04_executor/src/lib.rs | 15 +- examples/03_01_async_await/src/lib.rs | 140 ++++---- examples/05_01_streams/src/lib.rs | 102 +++--- .../src/lib.rs | 25 +- examples/06_02_join/src/lib.rs | 119 ++++--- examples/06_03_select/src/lib.rs | 318 +++++++++--------- examples/07_05_recursion/src/lib.rs | 3 +- examples/08_01_sync_tcp_server/src/main.rs | 2 +- examples/08_02_async_tcp_server/Cargo.toml | 5 +- examples/08_02_async_tcp_server/src/main.rs | 17 +- .../08_03_concurrent_tcp_server/Cargo.toml | 5 +- .../08_03_concurrent_tcp_server/src/main.rs | 15 +- .../08_04_nonblocking_tcp_server/Cargo.toml | 5 +- .../08_04_nonblocking_tcp_server/src/main.rs | 17 +- examples/08_05_final_tcp_server/Cargo.toml | 7 +- examples/08_05_final_tcp_server/src/main.rs | 29 +- src/08_example/00_intro.md | 4 +- src/08_example/01_running_async_code.md | 25 +- src/08_example/04_tests.md | 4 +- src/SUMMARY.md | 2 +- 24 files changed, 540 insertions(+), 498 deletions(-) diff --git a/examples/01_02_why_async/src/lib.rs b/examples/01_02_why_async/src/lib.rs index 64758e4f..a43f7a8f 100644 --- a/examples/01_02_why_async/src/lib.rs +++ b/examples/01_02_why_async/src/lib.rs @@ -1,10 +1,7 @@ #![cfg(test)] use { - futures::{ - executor::block_on, - join, - }, + futures::{executor::block_on, join}, std::thread, }; diff --git a/examples/01_04_async_await_primer/src/lib.rs b/examples/01_04_async_await_primer/src/lib.rs index 1a2c14eb..4425e22e 100644 --- a/examples/01_04_async_await_primer/src/lib.rs +++ b/examples/01_04_async_await_primer/src/lib.rs @@ -3,73 +3,81 @@ use futures::executor::block_on; mod first { -// ANCHOR: hello_world -// `block_on` blocks the current thread until the provided future has run to -// completion. Other executors provide more complex behavior, like scheduling -// multiple futures onto the same thread. -use futures::executor::block_on; + // ANCHOR: hello_world + // `block_on` blocks the current thread until the provided future has run to + // completion. Other executors provide more complex behavior, like scheduling + // multiple futures onto the same thread. + use futures::executor::block_on; -async fn hello_world() { - println!("hello, world!"); -} + async fn hello_world() { + println!("hello, world!"); + } -fn main() { - let future = hello_world(); // Nothing is printed - block_on(future); // `future` is run and "hello, world!" is printed -} -// ANCHOR_END: hello_world + fn main() { + let future = hello_world(); // Nothing is printed + block_on(future); // `future` is run and "hello, world!" is printed + } + // ANCHOR_END: hello_world -#[test] -fn run_main() { main() } + #[test] + fn run_main() { + main() + } } struct Song; -async fn learn_song() -> Song { Song } +async fn learn_song() -> Song { + Song +} async fn sing_song(_: Song) {} async fn dance() {} mod second { -use super::*; -// ANCHOR: block_on_each -fn main() { - let song = block_on(learn_song()); - block_on(sing_song(song)); - block_on(dance()); -} -// ANCHOR_END: block_on_each + use super::*; + // ANCHOR: block_on_each + fn main() { + let song = block_on(learn_song()); + block_on(sing_song(song)); + block_on(dance()); + } + // ANCHOR_END: block_on_each -#[test] -fn run_main() { main() } + #[test] + fn run_main() { + main() + } } mod third { -use super::*; -// ANCHOR: block_on_main -async fn learn_and_sing() { - // Wait until the song has been learned before singing it. - // We use `.await` here rather than `block_on` to prevent blocking the - // thread, which makes it possible to `dance` at the same time. - let song = learn_song().await; - sing_song(song).await; -} + use super::*; + // ANCHOR: block_on_main + async fn learn_and_sing() { + // Wait until the song has been learned before singing it. + // We use `.await` here rather than `block_on` to prevent blocking the + // thread, which makes it possible to `dance` at the same time. + let song = learn_song().await; + sing_song(song).await; + } -async fn async_main() { - let f1 = learn_and_sing(); - let f2 = dance(); + async fn async_main() { + let f1 = learn_and_sing(); + let f2 = dance(); - // `join!` is like `.await` but can wait for multiple futures concurrently. - // If we're temporarily blocked in the `learn_and_sing` future, the `dance` - // future will take over the current thread. If `dance` becomes blocked, - // `learn_and_sing` can take back over. If both futures are blocked, then - // `async_main` is blocked and will yield to the executor. - futures::join!(f1, f2); -} + // `join!` is like `.await` but can wait for multiple futures concurrently. + // If we're temporarily blocked in the `learn_and_sing` future, the `dance` + // future will take over the current thread. If `dance` becomes blocked, + // `learn_and_sing` can take back over. If both futures are blocked, then + // `async_main` is blocked and will yield to the executor. + futures::join!(f1, f2); + } -fn main() { - block_on(async_main()); -} -// ANCHOR_END: block_on_main + fn main() { + block_on(async_main()); + } + // ANCHOR_END: block_on_main -#[test] -fn run_main() { main() } + #[test] + fn run_main() { + main() + } } diff --git a/examples/02_02_future_trait/src/lib.rs b/examples/02_02_future_trait/src/lib.rs index 79e14d6c..790568c0 100644 --- a/examples/02_02_future_trait/src/lib.rs +++ b/examples/02_02_future_trait/src/lib.rs @@ -135,29 +135,29 @@ where // ANCHOR_END: and_then mod real_future { -use std::{ - future::Future as RealFuture, - pin::Pin, - task::{Context, Poll}, -}; + use std::{ + future::Future as RealFuture, + pin::Pin, + task::{Context, Poll}, + }; -// ANCHOR: real_future -trait Future { - type Output; - fn poll( - // Note the change from `&mut self` to `Pin<&mut Self>`: - self: Pin<&mut Self>, - // and the change from `wake: fn()` to `cx: &mut Context<'_>`: - cx: &mut Context<'_>, - ) -> Poll; -} -// ANCHOR_END: real_future + // ANCHOR: real_future + trait Future { + type Output; + fn poll( + // Note the change from `&mut self` to `Pin<&mut Self>`: + self: Pin<&mut Self>, + // and the change from `wake: fn()` to `cx: &mut Context<'_>`: + cx: &mut Context<'_>, + ) -> Poll; + } + // ANCHOR_END: real_future -// ensure that `Future` matches `RealFuture`: -impl Future for dyn RealFuture { - type Output = O; - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - RealFuture::poll(self, cx) + // ensure that `Future` matches `RealFuture`: + impl Future for dyn RealFuture { + type Output = O; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + RealFuture::poll(self, cx) + } } } -} diff --git a/examples/02_03_timer/src/lib.rs b/examples/02_03_timer/src/lib.rs index fc828949..ba98a369 100644 --- a/examples/02_03_timer/src/lib.rs +++ b/examples/02_03_timer/src/lib.rs @@ -1,13 +1,11 @@ // ANCHOR: imports -use { - std::{ - future::Future, - pin::Pin, - sync::{Arc, Mutex}, - task::{Context, Poll, Waker}, - thread, - time::Duration, - }, +use std::{ + future::Future, + pin::Pin, + sync::{Arc, Mutex}, + task::{Context, Poll, Waker}, + thread, + time::Duration, }; // ANCHOR_END: imports @@ -87,7 +85,5 @@ impl TimerFuture { #[test] fn block_on_timer() { - futures::executor::block_on(async { - TimerFuture::new(Duration::from_secs(1)).await - }) + futures::executor::block_on(async { TimerFuture::new(Duration::from_secs(1)).await }) } diff --git a/examples/02_04_executor/src/lib.rs b/examples/02_04_executor/src/lib.rs index 58ed7b7a..e5120cd9 100644 --- a/examples/02_04_executor/src/lib.rs +++ b/examples/02_04_executor/src/lib.rs @@ -3,13 +3,13 @@ // ANCHOR: imports use { futures::{ - future::{FutureExt, BoxFuture}, - task::{ArcWake, waker_ref}, + future::{BoxFuture, FutureExt}, + task::{waker_ref, ArcWake}, }, std::{ future::Future, + sync::mpsc::{sync_channel, Receiver, SyncSender}, sync::{Arc, Mutex}, - sync::mpsc::{sync_channel, SyncSender, Receiver}, task::{Context, Poll}, time::Duration, }, @@ -74,7 +74,10 @@ impl ArcWake for Task { // Implement `wake` by sending this task back onto the task channel // so that it will be polled again by the executor. let cloned = arc_self.clone(); - arc_self.task_sender.send(cloned).expect("too many tasks queued"); + arc_self + .task_sender + .send(cloned) + .expect("too many tasks queued"); } } // ANCHOR_END: arcwake_for_task @@ -128,4 +131,6 @@ fn main() { // ANCHOR_END: main #[test] -fn run_main() { main() } +fn run_main() { + main() +} diff --git a/examples/03_01_async_await/src/lib.rs b/examples/03_01_async_await/src/lib.rs index 9510a7d2..d145f4f6 100644 --- a/examples/03_01_async_await/src/lib.rs +++ b/examples/03_01_async_await/src/lib.rs @@ -2,90 +2,96 @@ #![cfg(test)] mod async_fn_and_block_examples { -use std::future::Future; -// ANCHOR: async_fn_and_block_examples + use std::future::Future; + // ANCHOR: async_fn_and_block_examples -// `foo()` returns a type that implements `Future`. -// `foo().await` will result in a value of type `u8`. -async fn foo() -> u8 { 5 } + // `foo()` returns a type that implements `Future`. + // `foo().await` will result in a value of type `u8`. + async fn foo() -> u8 { + 5 + } -fn bar() -> impl Future { - // This `async` block results in a type that implements - // `Future`. - async { - let x: u8 = foo().await; - x + 5 + fn bar() -> impl Future { + // This `async` block results in a type that implements + // `Future`. + async { + let x: u8 = foo().await; + x + 5 + } } -} -// ANCHOR_END: async_fn_and_block_examples + // ANCHOR_END: async_fn_and_block_examples } mod async_lifetimes_examples { -use std::future::Future; -// ANCHOR: lifetimes_expanded -// This function: -async fn foo(x: &u8) -> u8 { *x } - -// Is equivalent to this function: -fn foo_expanded<'a>(x: &'a u8) -> impl Future + 'a { - async move { *x } -} -// ANCHOR_END: lifetimes_expanded + use std::future::Future; + // ANCHOR: lifetimes_expanded + // This function: + async fn foo(x: &u8) -> u8 { + *x + } -async fn borrow_x(x: &u8) -> u8 { *x } + // Is equivalent to this function: + fn foo_expanded<'a>(x: &'a u8) -> impl Future + 'a { + async move { *x } + } + // ANCHOR_END: lifetimes_expanded -#[cfg(feature = "never_compiled")] -// ANCHOR: static_future_with_borrow -fn bad() -> impl Future { - let x = 5; - borrow_x(&x) // ERROR: `x` does not live long enough -} + async fn borrow_x(x: &u8) -> u8 { + *x + } -fn good() -> impl Future { - async { + #[cfg(feature = "never_compiled")] + // ANCHOR: static_future_with_borrow + fn bad() -> impl Future { let x = 5; - borrow_x(&x).await + borrow_x(&x) // ERROR: `x` does not live long enough } -} -// ANCHOR_END: static_future_with_borrow + + fn good() -> impl Future { + async { + let x = 5; + borrow_x(&x).await + } + } + // ANCHOR_END: static_future_with_borrow } mod async_move_examples { -use std::future::Future; -// ANCHOR: async_move_examples -/// `async` block: -/// -/// Multiple different `async` blocks can access the same local variable -/// so long as they're executed within the variable's scope -async fn blocks() { - let my_string = "foo".to_string(); + use std::future::Future; + // ANCHOR: async_move_examples + /// `async` block: + /// + /// Multiple different `async` blocks can access the same local variable + /// so long as they're executed within the variable's scope + async fn blocks() { + let my_string = "foo".to_string(); - let future_one = async { - // ... - println!("{}", my_string); - }; + let future_one = async { + // ... + println!("{}", my_string); + }; - let future_two = async { - // ... - println!("{}", my_string); - }; + let future_two = async { + // ... + println!("{}", my_string); + }; - // Run both futures to completion, printing "foo" twice: - let ((), ()) = futures::join!(future_one, future_two); -} + // Run both futures to completion, printing "foo" twice: + let ((), ()) = futures::join!(future_one, future_two); + } -/// `async move` block: -/// -/// Only one `async move` block can access the same captured variable, since -/// captures are moved into the `Future` generated by the `async move` block. -/// However, this allows the `Future` to outlive the original scope of the -/// variable: -fn move_block() -> impl Future { - let my_string = "foo".to_string(); - async move { - // ... - println!("{}", my_string); + /// `async move` block: + /// + /// Only one `async move` block can access the same captured variable, since + /// captures are moved into the `Future` generated by the `async move` block. + /// However, this allows the `Future` to outlive the original scope of the + /// variable: + fn move_block() -> impl Future { + let my_string = "foo".to_string(); + async move { + // ... + println!("{}", my_string); + } } -} -// ANCHOR_END: async_move_examples + // ANCHOR_END: async_move_examples } diff --git a/examples/05_01_streams/src/lib.rs b/examples/05_01_streams/src/lib.rs index d68fc7e3..09261889 100644 --- a/examples/05_01_streams/src/lib.rs +++ b/examples/05_01_streams/src/lib.rs @@ -1,63 +1,57 @@ #![cfg(test)] mod stream_trait { -use { - futures::stream::{Stream as RealStream}, - std::{ - pin::Pin, - task::{Context, Poll}, - }, -}; - -// ANCHOR: stream_trait -trait Stream { - /// The type of the value yielded by the stream. - type Item; - - /// Attempt to resolve the next item in the stream. - /// Returns `Poll::Pending` if not ready, `Poll::Ready(Some(x))` if a value - /// is ready, and `Poll::Ready(None)` if the stream has completed. - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) - -> Poll>; -} -// ANCHOR_END: stream_trait - -// assert that `Stream` matches `RealStream`: -impl Stream for dyn RealStream { - type Item = I; - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) - -> Poll> - { - RealStream::poll_next(self, cx) + use { + futures::stream::Stream as RealStream, + std::{ + pin::Pin, + task::{Context, Poll}, + }, + }; + + // ANCHOR: stream_trait + trait Stream { + /// The type of the value yielded by the stream. + type Item; + + /// Attempt to resolve the next item in the stream. + /// Returns `Poll::Pending` if not ready, `Poll::Ready(Some(x))` if a value + /// is ready, and `Poll::Ready(None)` if the stream has completed. + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; + } + // ANCHOR_END: stream_trait + + // assert that `Stream` matches `RealStream`: + impl Stream for dyn RealStream { + type Item = I; + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + RealStream::poll_next(self, cx) + } } -} } mod channels { -use { - futures::{ - channel::mpsc, - prelude::*, - }, -}; - -// ANCHOR: channels -async fn send_recv() { - const BUFFER_SIZE: usize = 10; - let (mut tx, mut rx) = mpsc::channel::(BUFFER_SIZE); - - tx.send(1).await.unwrap(); - tx.send(2).await.unwrap(); - drop(tx); - - // `StreamExt::next` is similar to `Iterator::next`, but returns a - // type that implements `Future>`. - assert_eq!(Some(1), rx.next().await); - assert_eq!(Some(2), rx.next().await); - assert_eq!(None, rx.next().await); -} -// ANCHOR_END: channels + use futures::{channel::mpsc, prelude::*}; + + // ANCHOR: channels + async fn send_recv() { + const BUFFER_SIZE: usize = 10; + let (mut tx, mut rx) = mpsc::channel::(BUFFER_SIZE); + + tx.send(1).await.unwrap(); + tx.send(2).await.unwrap(); + drop(tx); + + // `StreamExt::next` is similar to `Iterator::next`, but returns a + // type that implements `Future>`. + assert_eq!(Some(1), rx.next().await); + assert_eq!(Some(2), rx.next().await); + assert_eq!(None, rx.next().await); + } + // ANCHOR_END: channels -#[test] -fn run_send_recv() { futures::executor::block_on(send_recv()) } + #[test] + fn run_send_recv() { + futures::executor::block_on(send_recv()) + } } diff --git a/examples/05_02_iteration_and_concurrency/src/lib.rs b/examples/05_02_iteration_and_concurrency/src/lib.rs index 6eb36a14..18f2f93a 100644 --- a/examples/05_02_iteration_and_concurrency/src/lib.rs +++ b/examples/05_02_iteration_and_concurrency/src/lib.rs @@ -5,10 +5,7 @@ use { executor::block_on, stream::{self, Stream}, }, - std::{ - io, - pin::Pin, - }, + std::{io, pin::Pin}, }; // ANCHOR: nexts @@ -55,15 +52,21 @@ async fn jump_around( use futures::stream::TryStreamExt; // for `try_for_each_concurrent` const MAX_CONCURRENT_JUMPERS: usize = 100; - stream.try_for_each_concurrent(MAX_CONCURRENT_JUMPERS, |num| async move { - jump_n_times(num).await?; - report_n_jumps(num).await?; - Ok(()) - }).await?; + stream + .try_for_each_concurrent(MAX_CONCURRENT_JUMPERS, |num| async move { + jump_n_times(num).await?; + report_n_jumps(num).await?; + Ok(()) + }) + .await?; Ok(()) } // ANCHOR_END: try_for_each_concurrent -async fn jump_n_times(_: u8) -> Result<(), io::Error> { Ok(()) } -async fn report_n_jumps(_: u8) -> Result<(), io::Error> { Ok(()) } +async fn jump_n_times(_: u8) -> Result<(), io::Error> { + Ok(()) +} +async fn report_n_jumps(_: u8) -> Result<(), io::Error> { + Ok(()) +} diff --git a/examples/06_02_join/src/lib.rs b/examples/06_02_join/src/lib.rs index b2cd8719..5461db23 100644 --- a/examples/06_02_join/src/lib.rs +++ b/examples/06_02_join/src/lib.rs @@ -2,76 +2,89 @@ struct Book; struct Music; -async fn get_book() -> Book { Book } -async fn get_music() -> Music { Music } +async fn get_book() -> Book { + Book +} +async fn get_music() -> Music { + Music +} mod naiive { -use super::*; -// ANCHOR: naiive -async fn get_book_and_music() -> (Book, Music) { - let book = get_book().await; - let music = get_music().await; - (book, music) -} -// ANCHOR_END: naiive + use super::*; + // ANCHOR: naiive + async fn get_book_and_music() -> (Book, Music) { + let book = get_book().await; + let music = get_music().await; + (book, music) + } + // ANCHOR_END: naiive } mod other_langs { -use super::*; -// ANCHOR: other_langs -// WRONG -- don't do this -async fn get_book_and_music() -> (Book, Music) { - let book_future = get_book(); - let music_future = get_music(); - (book_future.await, music_future.await) -} -// ANCHOR_END: other_langs + use super::*; + // ANCHOR: other_langs + // WRONG -- don't do this + async fn get_book_and_music() -> (Book, Music) { + let book_future = get_book(); + let music_future = get_music(); + (book_future.await, music_future.await) + } + // ANCHOR_END: other_langs } mod join { -use super::*; -// ANCHOR: join -use futures::join; + use super::*; + // ANCHOR: join + use futures::join; -async fn get_book_and_music() -> (Book, Music) { - let book_fut = get_book(); - let music_fut = get_music(); - join!(book_fut, music_fut) -} -// ANCHOR_END: join + async fn get_book_and_music() -> (Book, Music) { + let book_fut = get_book(); + let music_fut = get_music(); + join!(book_fut, music_fut) + } + // ANCHOR_END: join } mod try_join { -use super::{Book, Music}; -// ANCHOR: try_join -use futures::try_join; + use super::{Book, Music}; + // ANCHOR: try_join + use futures::try_join; -async fn get_book() -> Result { /* ... */ Ok(Book) } -async fn get_music() -> Result { /* ... */ Ok(Music) } + async fn get_book() -> Result { + /* ... */ + Ok(Book) + } + async fn get_music() -> Result { + /* ... */ + Ok(Music) + } -async fn get_book_and_music() -> Result<(Book, Music), String> { - let book_fut = get_book(); - let music_fut = get_music(); - try_join!(book_fut, music_fut) -} -// ANCHOR_END: try_join + async fn get_book_and_music() -> Result<(Book, Music), String> { + let book_fut = get_book(); + let music_fut = get_music(); + try_join!(book_fut, music_fut) + } + // ANCHOR_END: try_join } mod mismatched_err { -use super::{Book, Music}; -// ANCHOR: try_join_map_err -use futures::{ - future::TryFutureExt, - try_join, -}; + use super::{Book, Music}; + // ANCHOR: try_join_map_err + use futures::{future::TryFutureExt, try_join}; -async fn get_book() -> Result { /* ... */ Ok(Book) } -async fn get_music() -> Result { /* ... */ Ok(Music) } + async fn get_book() -> Result { + /* ... */ + Ok(Book) + } + async fn get_music() -> Result { + /* ... */ + Ok(Music) + } -async fn get_book_and_music() -> Result<(Book, Music), String> { - let book_fut = get_book().map_err(|()| "Unable to get book".to_string()); - let music_fut = get_music(); - try_join!(book_fut, music_fut) -} -// ANCHOR_END: try_join_map_err + async fn get_book_and_music() -> Result<(Book, Music), String> { + let book_fut = get_book().map_err(|()| "Unable to get book".to_string()); + let music_fut = get_music(); + try_join!(book_fut, music_fut) + } + // ANCHOR_END: try_join_map_err } diff --git a/examples/06_03_select/src/lib.rs b/examples/06_03_select/src/lib.rs index 7d79d439..5a4b7dba 100644 --- a/examples/06_03_select/src/lib.rs +++ b/examples/06_03_select/src/lib.rs @@ -1,183 +1,193 @@ #![cfg(test)] -#![recursion_limit="128"] +#![recursion_limit = "128"] mod example { -// ANCHOR: example -use futures::{ - future::FutureExt, // for `.fuse()` - pin_mut, - select, -}; - -async fn task_one() { /* ... */ } -async fn task_two() { /* ... */ } + // ANCHOR: example + use futures::{ + future::FutureExt, // for `.fuse()` + pin_mut, + select, + }; + + async fn task_one() { /* ... */ + } + async fn task_two() { /* ... */ + } -async fn race_tasks() { - let t1 = task_one().fuse(); - let t2 = task_two().fuse(); + async fn race_tasks() { + let t1 = task_one().fuse(); + let t2 = task_two().fuse(); - pin_mut!(t1, t2); + pin_mut!(t1, t2); - select! { - () = t1 => println!("task one completed first"), - () = t2 => println!("task two completed first"), + select! { + () = t1 => println!("task one completed first"), + () = t2 => println!("task two completed first"), + } } -} -// ANCHOR_END: example + // ANCHOR_END: example } mod default_and_complete { -// ANCHOR: default_and_complete -use futures::{future, select}; - -async fn count() { - let mut a_fut = future::ready(4); - let mut b_fut = future::ready(6); - let mut total = 0; - - loop { - select! { - a = a_fut => total += a, - b = b_fut => total += b, - complete => break, - default => unreachable!(), // never runs (futures are ready, then complete) - }; + // ANCHOR: default_and_complete + use futures::{future, select}; + + async fn count() { + let mut a_fut = future::ready(4); + let mut b_fut = future::ready(6); + let mut total = 0; + + loop { + select! { + a = a_fut => total += a, + b = b_fut => total += b, + complete => break, + default => unreachable!(), // never runs (futures are ready, then complete) + }; + } + assert_eq!(total, 10); } - assert_eq!(total, 10); -} -// ANCHOR_END: default_and_complete + // ANCHOR_END: default_and_complete -#[test] -fn run_count() { - futures::executor::block_on(count()); -} + #[test] + fn run_count() { + futures::executor::block_on(count()); + } } mod fused_stream { -// ANCHOR: fused_stream -use futures::{ - stream::{Stream, StreamExt, FusedStream}, - select, -}; - -async fn add_two_streams( - mut s1: impl Stream + FusedStream + Unpin, - mut s2: impl Stream + FusedStream + Unpin, -) -> u8 { - let mut total = 0; - - loop { - let item = select! { - x = s1.next() => x, - x = s2.next() => x, - complete => break, - }; - if let Some(next_num) = item { - total += next_num; + // ANCHOR: fused_stream + use futures::{ + select, + stream::{FusedStream, Stream, StreamExt}, + }; + + async fn add_two_streams( + mut s1: impl Stream + FusedStream + Unpin, + mut s2: impl Stream + FusedStream + Unpin, + ) -> u8 { + let mut total = 0; + + loop { + let item = select! { + x = s1.next() => x, + x = s2.next() => x, + complete => break, + }; + if let Some(next_num) = item { + total += next_num; + } } - } - total -} -// ANCHOR_END: fused_stream + total + } + // ANCHOR_END: fused_stream } mod fuse_terminated { -// ANCHOR: fuse_terminated -use futures::{ - future::{Fuse, FusedFuture, FutureExt}, - stream::{FusedStream, Stream, StreamExt}, - pin_mut, - select, -}; - -async fn get_new_num() -> u8 { /* ... */ 5 } - -async fn run_on_new_num(_: u8) { /* ... */ } - -async fn run_loop( - mut interval_timer: impl Stream + FusedStream + Unpin, - starting_num: u8, -) { - let run_on_new_num_fut = run_on_new_num(starting_num).fuse(); - let get_new_num_fut = Fuse::terminated(); - pin_mut!(run_on_new_num_fut, get_new_num_fut); - loop { - select! { - () = interval_timer.select_next_some() => { - // The timer has elapsed. Start a new `get_new_num_fut` - // if one was not already running. - if get_new_num_fut.is_terminated() { - get_new_num_fut.set(get_new_num().fuse()); - } - }, - new_num = get_new_num_fut => { - // A new number has arrived-- start a new `run_on_new_num_fut`, - // dropping the old one. - run_on_new_num_fut.set(run_on_new_num(new_num).fuse()); - }, - // Run the `run_on_new_num_fut` - () = run_on_new_num_fut => {}, - // panic if everything completed, since the `interval_timer` should - // keep yielding values indefinitely. - complete => panic!("`interval_timer` completed unexpectedly"), + // ANCHOR: fuse_terminated + use futures::{ + future::{Fuse, FusedFuture, FutureExt}, + pin_mut, select, + stream::{FusedStream, Stream, StreamExt}, + }; + + async fn get_new_num() -> u8 { + /* ... */ + 5 + } + + async fn run_on_new_num(_: u8) { /* ... */ + } + + async fn run_loop( + mut interval_timer: impl Stream + FusedStream + Unpin, + starting_num: u8, + ) { + let run_on_new_num_fut = run_on_new_num(starting_num).fuse(); + let get_new_num_fut = Fuse::terminated(); + pin_mut!(run_on_new_num_fut, get_new_num_fut); + loop { + select! { + () = interval_timer.select_next_some() => { + // The timer has elapsed. Start a new `get_new_num_fut` + // if one was not already running. + if get_new_num_fut.is_terminated() { + get_new_num_fut.set(get_new_num().fuse()); + } + }, + new_num = get_new_num_fut => { + // A new number has arrived-- start a new `run_on_new_num_fut`, + // dropping the old one. + run_on_new_num_fut.set(run_on_new_num(new_num).fuse()); + }, + // Run the `run_on_new_num_fut` + () = run_on_new_num_fut => {}, + // panic if everything completed, since the `interval_timer` should + // keep yielding values indefinitely. + complete => panic!("`interval_timer` completed unexpectedly"), + } } } -} -// ANCHOR_END: fuse_terminated + // ANCHOR_END: fuse_terminated } mod futures_unordered { -// ANCHOR: futures_unordered -use futures::{ - future::{Fuse, FusedFuture, FutureExt}, - stream::{FusedStream, FuturesUnordered, Stream, StreamExt}, - pin_mut, - select, -}; - -async fn get_new_num() -> u8 { /* ... */ 5 } - -async fn run_on_new_num(_: u8) -> u8 { /* ... */ 5 } - -// Runs `run_on_new_num` with the latest number -// retrieved from `get_new_num`. -// -// `get_new_num` is re-run every time a timer elapses, -// immediately cancelling the currently running -// `run_on_new_num` and replacing it with the newly -// returned value. -async fn run_loop( - mut interval_timer: impl Stream + FusedStream + Unpin, - starting_num: u8, -) { - let mut run_on_new_num_futs = FuturesUnordered::new(); - run_on_new_num_futs.push(run_on_new_num(starting_num)); - let get_new_num_fut = Fuse::terminated(); - pin_mut!(get_new_num_fut); - loop { - select! { - () = interval_timer.select_next_some() => { - // The timer has elapsed. Start a new `get_new_num_fut` - // if one was not already running. - if get_new_num_fut.is_terminated() { - get_new_num_fut.set(get_new_num().fuse()); - } - }, - new_num = get_new_num_fut => { - // A new number has arrived-- start a new `run_on_new_num_fut`. - run_on_new_num_futs.push(run_on_new_num(new_num)); - }, - // Run the `run_on_new_num_futs` and check if any have completed - res = run_on_new_num_futs.select_next_some() => { - println!("run_on_new_num_fut returned {:?}", res); - }, - // panic if everything completed, since the `interval_timer` should - // keep yielding values indefinitely. - complete => panic!("`interval_timer` completed unexpectedly"), + // ANCHOR: futures_unordered + use futures::{ + future::{Fuse, FusedFuture, FutureExt}, + pin_mut, select, + stream::{FusedStream, FuturesUnordered, Stream, StreamExt}, + }; + + async fn get_new_num() -> u8 { + /* ... */ + 5 + } + + async fn run_on_new_num(_: u8) -> u8 { + /* ... */ + 5 + } + + // Runs `run_on_new_num` with the latest number + // retrieved from `get_new_num`. + // + // `get_new_num` is re-run every time a timer elapses, + // immediately cancelling the currently running + // `run_on_new_num` and replacing it with the newly + // returned value. + async fn run_loop( + mut interval_timer: impl Stream + FusedStream + Unpin, + starting_num: u8, + ) { + let mut run_on_new_num_futs = FuturesUnordered::new(); + run_on_new_num_futs.push(run_on_new_num(starting_num)); + let get_new_num_fut = Fuse::terminated(); + pin_mut!(get_new_num_fut); + loop { + select! { + () = interval_timer.select_next_some() => { + // The timer has elapsed. Start a new `get_new_num_fut` + // if one was not already running. + if get_new_num_fut.is_terminated() { + get_new_num_fut.set(get_new_num().fuse()); + } + }, + new_num = get_new_num_fut => { + // A new number has arrived-- start a new `run_on_new_num_fut`. + run_on_new_num_futs.push(run_on_new_num(new_num)); + }, + // Run the `run_on_new_num_futs` and check if any have completed + res = run_on_new_num_futs.select_next_some() => { + println!("run_on_new_num_fut returned {:?}", res); + }, + // panic if everything completed, since the `interval_timer` should + // keep yielding values indefinitely. + complete => panic!("`interval_timer` completed unexpectedly"), + } } } -} -// ANCHOR_END: futures_unordered + // ANCHOR_END: futures_unordered } diff --git a/examples/07_05_recursion/src/lib.rs b/examples/07_05_recursion/src/lib.rs index 7811cfb3..10e8c8b4 100644 --- a/examples/07_05_recursion/src/lib.rs +++ b/examples/07_05_recursion/src/lib.rs @@ -8,6 +8,7 @@ fn recursive() -> BoxFuture<'static, ()> { async move { recursive().await; recursive().await; - }.boxed() + } + .boxed() } // ANCHOR_END: example diff --git a/examples/08_01_sync_tcp_server/src/main.rs b/examples/08_01_sync_tcp_server/src/main.rs index 8af6b4a1..93bd410d 100644 --- a/examples/08_01_sync_tcp_server/src/main.rs +++ b/examples/08_01_sync_tcp_server/src/main.rs @@ -36,4 +36,4 @@ fn handle_connection(mut stream: TcpStream) { let response = format!("{}{}", status_line, contents); stream.write(response.as_bytes()).unwrap(); stream.flush().unwrap(); -} \ No newline at end of file +} diff --git a/examples/08_02_async_tcp_server/Cargo.toml b/examples/08_02_async_tcp_server/Cargo.toml index d471d262..196d1a58 100644 --- a/examples/08_02_async_tcp_server/Cargo.toml +++ b/examples/08_02_async_tcp_server/Cargo.toml @@ -6,5 +6,6 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html -[dependencies] -async-std = "1.6" +[dependencies.async-std] +version = "1.6" +features = ["attributes"] diff --git a/examples/08_02_async_tcp_server/src/main.rs b/examples/08_02_async_tcp_server/src/main.rs index 777b83d9..c914c36d 100644 --- a/examples/08_02_async_tcp_server/src/main.rs +++ b/examples/08_02_async_tcp_server/src/main.rs @@ -1,18 +1,15 @@ use std::net::TcpListener; use std::net::TcpStream; -use async_std::task::block_on; - // ANCHOR: main_func -fn main() { +#[async_std::main] +async fn main() { let listener = TcpListener::bind("127.0.0.1:7878").unwrap(); - block_on(async { - for stream in listener.incoming() { - let stream = stream.unwrap(); - // Warning: This is not concurrent! - handle_connection(stream).await; - } - }) + for stream in listener.incoming() { + let stream = stream.unwrap(); + // Warning: This is not concurrent! + handle_connection(stream).await; + } } // ANCHOR_END: main_func diff --git a/examples/08_03_concurrent_tcp_server/Cargo.toml b/examples/08_03_concurrent_tcp_server/Cargo.toml index 07b882f5..a34d5db0 100644 --- a/examples/08_03_concurrent_tcp_server/Cargo.toml +++ b/examples/08_03_concurrent_tcp_server/Cargo.toml @@ -6,5 +6,6 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html -[dependencies] -async-std = "1.6" +[dependencies.async-std] +version = "1.6" +features = ["attributes"] \ No newline at end of file diff --git a/examples/08_03_concurrent_tcp_server/src/main.rs b/examples/08_03_concurrent_tcp_server/src/main.rs index bd522f8a..56f9b2d5 100644 --- a/examples/08_03_concurrent_tcp_server/src/main.rs +++ b/examples/08_03_concurrent_tcp_server/src/main.rs @@ -2,16 +2,15 @@ use std::net::TcpListener; use std::net::TcpStream; // ANCHOR: main_func -use async_std::task::{block_on, spawn}; +use async_std::task::spawn; -fn main() { +#[async_std::main] +async fn main() { let listener = TcpListener::bind("127.0.0.1:7878").unwrap(); - block_on(async { - for stream in listener.incoming() { - let stream = stream.unwrap(); - spawn(async {handle_connection(stream).await} ); - } - }) + for stream in listener.incoming() { + let stream = stream.unwrap(); + spawn(handle_connection(stream)); + } } // ANCHOR_END: main_func diff --git a/examples/08_04_nonblocking_tcp_server/Cargo.toml b/examples/08_04_nonblocking_tcp_server/Cargo.toml index 276c4d92..9a4e7b97 100644 --- a/examples/08_04_nonblocking_tcp_server/Cargo.toml +++ b/examples/08_04_nonblocking_tcp_server/Cargo.toml @@ -6,5 +6,6 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html -[dependencies] -async-std = "1.6" \ No newline at end of file +[dependencies.async-std] +version = "1.6" +features = ["attributes"] \ No newline at end of file diff --git a/examples/08_04_nonblocking_tcp_server/src/main.rs b/examples/08_04_nonblocking_tcp_server/src/main.rs index 0751f60c..8cc6735e 100644 --- a/examples/08_04_nonblocking_tcp_server/src/main.rs +++ b/examples/08_04_nonblocking_tcp_server/src/main.rs @@ -1,16 +1,15 @@ // ANCHOR: main_func use async_std::net::{TcpListener, TcpStream}; -use async_std::task::{block_on, spawn}; +use async_std::task::spawn; -fn main() { - block_on(async { - let listener = TcpListener::bind("127.0.0.1:7878").await.unwrap(); +#[async_std::main] +async fn main() { + let listener = TcpListener::bind("127.0.0.1:7878").await.unwrap(); - loop { - let (stream, _) = listener.accept().await.unwrap(); - spawn(handle_connection(stream)); - } - }) + loop { + let (stream, _) = listener.accept().await.unwrap(); + spawn(handle_connection(stream)); + } } // ANCHOR_END: main_func diff --git a/examples/08_05_final_tcp_server/Cargo.toml b/examples/08_05_final_tcp_server/Cargo.toml index 7b1e8e00..0bf0e9c9 100644 --- a/examples/08_05_final_tcp_server/Cargo.toml +++ b/examples/08_05_final_tcp_server/Cargo.toml @@ -7,5 +7,8 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -async-std = "1.6" -futures = "0.3" \ No newline at end of file +futures = "0.3" + +[dependencies.async-std] +version = "1.6" +features = ["attributes"] \ No newline at end of file diff --git a/examples/08_05_final_tcp_server/src/main.rs b/examples/08_05_final_tcp_server/src/main.rs index 97c3ed84..32793d9c 100644 --- a/examples/08_05_final_tcp_server/src/main.rs +++ b/examples/08_05_final_tcp_server/src/main.rs @@ -5,17 +5,16 @@ use futures::join; use async_std::net::TcpListener; use async_std::prelude::*; -use async_std::task::{block_on, spawn}; +use async_std::task::spawn; -fn main() { - block_on(async { - let listener = TcpListener::bind("127.0.0.1:7878").await.unwrap(); +#[async_std::main] +async fn main() { + let listener = TcpListener::bind("127.0.0.1:7878").await.unwrap(); - loop { - let (stream, _) = listener.accept().await.unwrap(); - spawn(handle_connection(stream)); - } - }) + loop { + let (stream, _) = listener.accept().await.unwrap(); + spawn(handle_connection(stream)); + } } use async_std::io::{Read, Write}; @@ -129,8 +128,8 @@ mod tests { // ANCHOR: test use std::fs; - #[test] - fn test_handle_connection() { + #[async_std::test] + async fn test_handle_connection() { let input_bytes = b"GET / HTTP/1.1\r\n"; let mut contents = vec![0u8; 1024]; contents[..input_bytes.len()].clone_from_slice(input_bytes); @@ -139,11 +138,9 @@ mod tests { write_data: Vec::new(), }; - block_on(async { - handle_connection(&mut stream).await; - let mut buf = [0u8; 1024]; - stream.read(&mut buf).await.unwrap(); - }); + handle_connection(&mut stream).await; + let mut buf = [0u8; 1024]; + stream.read(&mut buf).await.unwrap(); let expected_contents = fs::read_to_string("hello.html").unwrap(); let expected_response = format!("HTTP/1.1 200 OK\r\n\r\n{}", expected_contents); diff --git a/src/08_example/00_intro.md b/src/08_example/00_intro.md index 142b9a45..bc776f0c 100644 --- a/src/08_example/00_intro.md +++ b/src/08_example/00_intro.md @@ -1,7 +1,9 @@ -# Applied: Building a Concurrent Web Server with Async Rust +# Final Project: Building a Concurrent Web Server with Async Rust In this chapter, we'll use asynchronous Rust to modify the Rust book's [single-threaded web server](https://doc.rust-lang.org/book/ch20-01-single-threaded.html) to serve requests concurrently. + +## Recap Here's what the code looked like at the end of the lesson. `src/main.rs`: diff --git a/src/08_example/01_running_async_code.md b/src/08_example/01_running_async_code.md index b67944f4..28f1b927 100644 --- a/src/08_example/01_running_async_code.md +++ b/src/08_example/01_running_async_code.md @@ -1,9 +1,8 @@ # Running Asynchronous Code -As [the book explains](https://doc.rust-lang.org/book/ch20-02-multithreaded.html#turning-our-single-threaded-server-into-a-multithreaded-server), -we don't want our web server to wait for each request to finish before handling the next, -as some requests could be very slow. -Instead of improving throughput by adding threads, -we'll use asynchronous code to process requests concurrently. +An HTTP server should be able to serve multiple clients concurrently. +Waiting for previous requests to complete before handling the current request can cause performance to suffer. +The book [solves this problem](https://doc.rust-lang.org/book/ch20-02-multithreaded.html#turning-our-single-threaded-server-into-a-multithreaded-server) by creating a thread pool where each connection is handled on its own thread. +Here, instead of improving throughput by adding threads, we'll achieve the same effect using asynchronous code. Let's modify `handle_connection` to return a future by declaring it an `async fn`: ```rust,ignore @@ -32,9 +31,19 @@ it'll never run. If you run the server and visit `127.0.0.1:7878` in a browser, you'll see that the connection is refused; our server is not handling requests. We can't `await` or `poll` futures within synchronous code by itself. -We'll need an executor to handle scheduling and running futures to completion. -Please consult the section [Choosing an Executor](../404.md) for more information on executors. -Here, we'll use the `block_on` executor from the `async_std` crate. +We'll need an asynchronous runtime to handle scheduling and running futures to completion. +Please consult the section on choosing a runtime for more information on asynchronous runtimes, executors, and reactors. + +[//]: <> (TODO: Link to section on runtimes once complete.) + +Here, we'll use an executor from the `async-std` crate. +The `#[async_std::main]` attribute from `async-std` allows us to write an asynchronous main function. +To use it, enable the `attributes` feature of `async-std` in `Cargo.toml`: +```toml +[dependencies.async-std] +version = "1.6" +features = ["attributes"] +``` It might be tempting to write something like this: ```rust diff --git a/src/08_example/04_tests.md b/src/08_example/04_tests.md index 6e2a5240..eb71f55c 100644 --- a/src/08_example/04_tests.md +++ b/src/08_example/04_tests.md @@ -1,4 +1,4 @@ -# Testing Async Code +# Testing the TCP Server Let's move on to testing our `handle_connection` function. First, we need a `TcpStream` to work with, but we don't want to make a real TCP connection in test code. We could work around this in a few ways. @@ -45,7 +45,7 @@ For more information on pinning and the `Unpin` trait, see the [section on pinni Now we're ready to test the `handle_connection` function. After setting up the `MockTcpStream` containing some initial data, -we can run `handle_connection` using `async_std::task::block_on`, exactly as we did in the main method. +we can run `handle_connection` using the attribute `#[async_std::test]`, similarly to how we used `#[async_std::main]`. To ensure that `handle_connection` works as intended, we'll check that the correct data was written to the `MockTcpStream` based on its initial contents. ```rust,ignore diff --git a/src/SUMMARY.md b/src/SUMMARY.md index bbdf925a..282f55f1 100644 --- a/src/SUMMARY.md +++ b/src/SUMMARY.md @@ -25,7 +25,7 @@ - [`Send` Approximation](07_workarounds/04_send_approximation.md) - [Recursion](07_workarounds/05_recursion.md) - [`async` in Traits](07_workarounds/06_async_in_traits.md) -- [Applied: HTTP Server](08_example/00_intro.md) +- [Final Project: HTTP Server](08_example/00_intro.md) - [Running Asynchronous Code](08_example/01_running_async_code.md) - [Handling Connections Concurrently](08_example/02_handling_connections_concurrently.md) - [Multiple Actions Per Request](08_example/03_combinators.md) From fb5de20581278c55fb421571360c4a6f4d43bee0 Mon Sep 17 00:00:00 2001 From: Lee Bernick Date: Fri, 4 Sep 2020 18:12:15 -0400 Subject: [PATCH 05/12] Revert formatting changes to other examples --- examples/01_02_why_async/src/lib.rs | 5 +- examples/01_04_async_await_primer/src/lib.rs | 110 +++--- examples/02_02_future_trait/src/lib.rs | 44 +-- examples/02_03_timer/src/lib.rs | 20 +- examples/03_01_async_await/src/lib.rs | 140 ++++---- examples/05_01_streams/src/lib.rs | 102 +++--- .../src/lib.rs | 25 +- examples/06_02_join/src/lib.rs | 119 +++---- examples/06_03_select/src/lib.rs | 318 +++++++++--------- examples/07_05_recursion/src/lib.rs | 3 +- 10 files changed, 429 insertions(+), 457 deletions(-) diff --git a/examples/01_02_why_async/src/lib.rs b/examples/01_02_why_async/src/lib.rs index a43f7a8f..64758e4f 100644 --- a/examples/01_02_why_async/src/lib.rs +++ b/examples/01_02_why_async/src/lib.rs @@ -1,7 +1,10 @@ #![cfg(test)] use { - futures::{executor::block_on, join}, + futures::{ + executor::block_on, + join, + }, std::thread, }; diff --git a/examples/01_04_async_await_primer/src/lib.rs b/examples/01_04_async_await_primer/src/lib.rs index 4425e22e..1a2c14eb 100644 --- a/examples/01_04_async_await_primer/src/lib.rs +++ b/examples/01_04_async_await_primer/src/lib.rs @@ -3,81 +3,73 @@ use futures::executor::block_on; mod first { - // ANCHOR: hello_world - // `block_on` blocks the current thread until the provided future has run to - // completion. Other executors provide more complex behavior, like scheduling - // multiple futures onto the same thread. - use futures::executor::block_on; +// ANCHOR: hello_world +// `block_on` blocks the current thread until the provided future has run to +// completion. Other executors provide more complex behavior, like scheduling +// multiple futures onto the same thread. +use futures::executor::block_on; - async fn hello_world() { - println!("hello, world!"); - } +async fn hello_world() { + println!("hello, world!"); +} - fn main() { - let future = hello_world(); // Nothing is printed - block_on(future); // `future` is run and "hello, world!" is printed - } - // ANCHOR_END: hello_world +fn main() { + let future = hello_world(); // Nothing is printed + block_on(future); // `future` is run and "hello, world!" is printed +} +// ANCHOR_END: hello_world - #[test] - fn run_main() { - main() - } +#[test] +fn run_main() { main() } } struct Song; -async fn learn_song() -> Song { - Song -} +async fn learn_song() -> Song { Song } async fn sing_song(_: Song) {} async fn dance() {} mod second { - use super::*; - // ANCHOR: block_on_each - fn main() { - let song = block_on(learn_song()); - block_on(sing_song(song)); - block_on(dance()); - } - // ANCHOR_END: block_on_each +use super::*; +// ANCHOR: block_on_each +fn main() { + let song = block_on(learn_song()); + block_on(sing_song(song)); + block_on(dance()); +} +// ANCHOR_END: block_on_each - #[test] - fn run_main() { - main() - } +#[test] +fn run_main() { main() } } mod third { - use super::*; - // ANCHOR: block_on_main - async fn learn_and_sing() { - // Wait until the song has been learned before singing it. - // We use `.await` here rather than `block_on` to prevent blocking the - // thread, which makes it possible to `dance` at the same time. - let song = learn_song().await; - sing_song(song).await; - } +use super::*; +// ANCHOR: block_on_main +async fn learn_and_sing() { + // Wait until the song has been learned before singing it. + // We use `.await` here rather than `block_on` to prevent blocking the + // thread, which makes it possible to `dance` at the same time. + let song = learn_song().await; + sing_song(song).await; +} - async fn async_main() { - let f1 = learn_and_sing(); - let f2 = dance(); +async fn async_main() { + let f1 = learn_and_sing(); + let f2 = dance(); - // `join!` is like `.await` but can wait for multiple futures concurrently. - // If we're temporarily blocked in the `learn_and_sing` future, the `dance` - // future will take over the current thread. If `dance` becomes blocked, - // `learn_and_sing` can take back over. If both futures are blocked, then - // `async_main` is blocked and will yield to the executor. - futures::join!(f1, f2); - } + // `join!` is like `.await` but can wait for multiple futures concurrently. + // If we're temporarily blocked in the `learn_and_sing` future, the `dance` + // future will take over the current thread. If `dance` becomes blocked, + // `learn_and_sing` can take back over. If both futures are blocked, then + // `async_main` is blocked and will yield to the executor. + futures::join!(f1, f2); +} - fn main() { - block_on(async_main()); - } - // ANCHOR_END: block_on_main +fn main() { + block_on(async_main()); +} +// ANCHOR_END: block_on_main - #[test] - fn run_main() { - main() - } +#[test] +fn run_main() { main() } } diff --git a/examples/02_02_future_trait/src/lib.rs b/examples/02_02_future_trait/src/lib.rs index 790568c0..79e14d6c 100644 --- a/examples/02_02_future_trait/src/lib.rs +++ b/examples/02_02_future_trait/src/lib.rs @@ -135,29 +135,29 @@ where // ANCHOR_END: and_then mod real_future { - use std::{ - future::Future as RealFuture, - pin::Pin, - task::{Context, Poll}, - }; +use std::{ + future::Future as RealFuture, + pin::Pin, + task::{Context, Poll}, +}; - // ANCHOR: real_future - trait Future { - type Output; - fn poll( - // Note the change from `&mut self` to `Pin<&mut Self>`: - self: Pin<&mut Self>, - // and the change from `wake: fn()` to `cx: &mut Context<'_>`: - cx: &mut Context<'_>, - ) -> Poll; - } - // ANCHOR_END: real_future +// ANCHOR: real_future +trait Future { + type Output; + fn poll( + // Note the change from `&mut self` to `Pin<&mut Self>`: + self: Pin<&mut Self>, + // and the change from `wake: fn()` to `cx: &mut Context<'_>`: + cx: &mut Context<'_>, + ) -> Poll; +} +// ANCHOR_END: real_future - // ensure that `Future` matches `RealFuture`: - impl Future for dyn RealFuture { - type Output = O; - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - RealFuture::poll(self, cx) - } +// ensure that `Future` matches `RealFuture`: +impl Future for dyn RealFuture { + type Output = O; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + RealFuture::poll(self, cx) } } +} diff --git a/examples/02_03_timer/src/lib.rs b/examples/02_03_timer/src/lib.rs index ba98a369..fc828949 100644 --- a/examples/02_03_timer/src/lib.rs +++ b/examples/02_03_timer/src/lib.rs @@ -1,11 +1,13 @@ // ANCHOR: imports -use std::{ - future::Future, - pin::Pin, - sync::{Arc, Mutex}, - task::{Context, Poll, Waker}, - thread, - time::Duration, +use { + std::{ + future::Future, + pin::Pin, + sync::{Arc, Mutex}, + task::{Context, Poll, Waker}, + thread, + time::Duration, + }, }; // ANCHOR_END: imports @@ -85,5 +87,7 @@ impl TimerFuture { #[test] fn block_on_timer() { - futures::executor::block_on(async { TimerFuture::new(Duration::from_secs(1)).await }) + futures::executor::block_on(async { + TimerFuture::new(Duration::from_secs(1)).await + }) } diff --git a/examples/03_01_async_await/src/lib.rs b/examples/03_01_async_await/src/lib.rs index d145f4f6..9510a7d2 100644 --- a/examples/03_01_async_await/src/lib.rs +++ b/examples/03_01_async_await/src/lib.rs @@ -2,96 +2,90 @@ #![cfg(test)] mod async_fn_and_block_examples { - use std::future::Future; - // ANCHOR: async_fn_and_block_examples +use std::future::Future; +// ANCHOR: async_fn_and_block_examples - // `foo()` returns a type that implements `Future`. - // `foo().await` will result in a value of type `u8`. - async fn foo() -> u8 { - 5 - } +// `foo()` returns a type that implements `Future`. +// `foo().await` will result in a value of type `u8`. +async fn foo() -> u8 { 5 } - fn bar() -> impl Future { - // This `async` block results in a type that implements - // `Future`. - async { - let x: u8 = foo().await; - x + 5 - } +fn bar() -> impl Future { + // This `async` block results in a type that implements + // `Future`. + async { + let x: u8 = foo().await; + x + 5 } - // ANCHOR_END: async_fn_and_block_examples +} +// ANCHOR_END: async_fn_and_block_examples } mod async_lifetimes_examples { - use std::future::Future; - // ANCHOR: lifetimes_expanded - // This function: - async fn foo(x: &u8) -> u8 { - *x - } +use std::future::Future; +// ANCHOR: lifetimes_expanded +// This function: +async fn foo(x: &u8) -> u8 { *x } - // Is equivalent to this function: - fn foo_expanded<'a>(x: &'a u8) -> impl Future + 'a { - async move { *x } - } - // ANCHOR_END: lifetimes_expanded +// Is equivalent to this function: +fn foo_expanded<'a>(x: &'a u8) -> impl Future + 'a { + async move { *x } +} +// ANCHOR_END: lifetimes_expanded - async fn borrow_x(x: &u8) -> u8 { - *x - } +async fn borrow_x(x: &u8) -> u8 { *x } - #[cfg(feature = "never_compiled")] - // ANCHOR: static_future_with_borrow - fn bad() -> impl Future { - let x = 5; - borrow_x(&x) // ERROR: `x` does not live long enough - } +#[cfg(feature = "never_compiled")] +// ANCHOR: static_future_with_borrow +fn bad() -> impl Future { + let x = 5; + borrow_x(&x) // ERROR: `x` does not live long enough +} - fn good() -> impl Future { - async { - let x = 5; - borrow_x(&x).await - } +fn good() -> impl Future { + async { + let x = 5; + borrow_x(&x).await } - // ANCHOR_END: static_future_with_borrow +} +// ANCHOR_END: static_future_with_borrow } mod async_move_examples { - use std::future::Future; - // ANCHOR: async_move_examples - /// `async` block: - /// - /// Multiple different `async` blocks can access the same local variable - /// so long as they're executed within the variable's scope - async fn blocks() { - let my_string = "foo".to_string(); +use std::future::Future; +// ANCHOR: async_move_examples +/// `async` block: +/// +/// Multiple different `async` blocks can access the same local variable +/// so long as they're executed within the variable's scope +async fn blocks() { + let my_string = "foo".to_string(); - let future_one = async { - // ... - println!("{}", my_string); - }; + let future_one = async { + // ... + println!("{}", my_string); + }; - let future_two = async { - // ... - println!("{}", my_string); - }; + let future_two = async { + // ... + println!("{}", my_string); + }; - // Run both futures to completion, printing "foo" twice: - let ((), ()) = futures::join!(future_one, future_two); - } + // Run both futures to completion, printing "foo" twice: + let ((), ()) = futures::join!(future_one, future_two); +} - /// `async move` block: - /// - /// Only one `async move` block can access the same captured variable, since - /// captures are moved into the `Future` generated by the `async move` block. - /// However, this allows the `Future` to outlive the original scope of the - /// variable: - fn move_block() -> impl Future { - let my_string = "foo".to_string(); - async move { - // ... - println!("{}", my_string); - } +/// `async move` block: +/// +/// Only one `async move` block can access the same captured variable, since +/// captures are moved into the `Future` generated by the `async move` block. +/// However, this allows the `Future` to outlive the original scope of the +/// variable: +fn move_block() -> impl Future { + let my_string = "foo".to_string(); + async move { + // ... + println!("{}", my_string); } - // ANCHOR_END: async_move_examples +} +// ANCHOR_END: async_move_examples } diff --git a/examples/05_01_streams/src/lib.rs b/examples/05_01_streams/src/lib.rs index 09261889..d68fc7e3 100644 --- a/examples/05_01_streams/src/lib.rs +++ b/examples/05_01_streams/src/lib.rs @@ -1,57 +1,63 @@ #![cfg(test)] mod stream_trait { - use { - futures::stream::Stream as RealStream, - std::{ - pin::Pin, - task::{Context, Poll}, - }, - }; - - // ANCHOR: stream_trait - trait Stream { - /// The type of the value yielded by the stream. - type Item; - - /// Attempt to resolve the next item in the stream. - /// Returns `Poll::Pending` if not ready, `Poll::Ready(Some(x))` if a value - /// is ready, and `Poll::Ready(None)` if the stream has completed. - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - } - // ANCHOR_END: stream_trait - - // assert that `Stream` matches `RealStream`: - impl Stream for dyn RealStream { - type Item = I; - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - RealStream::poll_next(self, cx) - } +use { + futures::stream::{Stream as RealStream}, + std::{ + pin::Pin, + task::{Context, Poll}, + }, +}; + +// ANCHOR: stream_trait +trait Stream { + /// The type of the value yielded by the stream. + type Item; + + /// Attempt to resolve the next item in the stream. + /// Returns `Poll::Pending` if not ready, `Poll::Ready(Some(x))` if a value + /// is ready, and `Poll::Ready(None)` if the stream has completed. + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) + -> Poll>; +} +// ANCHOR_END: stream_trait + +// assert that `Stream` matches `RealStream`: +impl Stream for dyn RealStream { + type Item = I; + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) + -> Poll> + { + RealStream::poll_next(self, cx) } } +} mod channels { - use futures::{channel::mpsc, prelude::*}; - - // ANCHOR: channels - async fn send_recv() { - const BUFFER_SIZE: usize = 10; - let (mut tx, mut rx) = mpsc::channel::(BUFFER_SIZE); - - tx.send(1).await.unwrap(); - tx.send(2).await.unwrap(); - drop(tx); - - // `StreamExt::next` is similar to `Iterator::next`, but returns a - // type that implements `Future>`. - assert_eq!(Some(1), rx.next().await); - assert_eq!(Some(2), rx.next().await); - assert_eq!(None, rx.next().await); - } - // ANCHOR_END: channels +use { + futures::{ + channel::mpsc, + prelude::*, + }, +}; + +// ANCHOR: channels +async fn send_recv() { + const BUFFER_SIZE: usize = 10; + let (mut tx, mut rx) = mpsc::channel::(BUFFER_SIZE); + + tx.send(1).await.unwrap(); + tx.send(2).await.unwrap(); + drop(tx); + + // `StreamExt::next` is similar to `Iterator::next`, but returns a + // type that implements `Future>`. + assert_eq!(Some(1), rx.next().await); + assert_eq!(Some(2), rx.next().await); + assert_eq!(None, rx.next().await); +} +// ANCHOR_END: channels - #[test] - fn run_send_recv() { - futures::executor::block_on(send_recv()) - } +#[test] +fn run_send_recv() { futures::executor::block_on(send_recv()) } } diff --git a/examples/05_02_iteration_and_concurrency/src/lib.rs b/examples/05_02_iteration_and_concurrency/src/lib.rs index 18f2f93a..6eb36a14 100644 --- a/examples/05_02_iteration_and_concurrency/src/lib.rs +++ b/examples/05_02_iteration_and_concurrency/src/lib.rs @@ -5,7 +5,10 @@ use { executor::block_on, stream::{self, Stream}, }, - std::{io, pin::Pin}, + std::{ + io, + pin::Pin, + }, }; // ANCHOR: nexts @@ -52,21 +55,15 @@ async fn jump_around( use futures::stream::TryStreamExt; // for `try_for_each_concurrent` const MAX_CONCURRENT_JUMPERS: usize = 100; - stream - .try_for_each_concurrent(MAX_CONCURRENT_JUMPERS, |num| async move { - jump_n_times(num).await?; - report_n_jumps(num).await?; - Ok(()) - }) - .await?; + stream.try_for_each_concurrent(MAX_CONCURRENT_JUMPERS, |num| async move { + jump_n_times(num).await?; + report_n_jumps(num).await?; + Ok(()) + }).await?; Ok(()) } // ANCHOR_END: try_for_each_concurrent -async fn jump_n_times(_: u8) -> Result<(), io::Error> { - Ok(()) -} -async fn report_n_jumps(_: u8) -> Result<(), io::Error> { - Ok(()) -} +async fn jump_n_times(_: u8) -> Result<(), io::Error> { Ok(()) } +async fn report_n_jumps(_: u8) -> Result<(), io::Error> { Ok(()) } diff --git a/examples/06_02_join/src/lib.rs b/examples/06_02_join/src/lib.rs index 5461db23..b2cd8719 100644 --- a/examples/06_02_join/src/lib.rs +++ b/examples/06_02_join/src/lib.rs @@ -2,89 +2,76 @@ struct Book; struct Music; -async fn get_book() -> Book { - Book -} -async fn get_music() -> Music { - Music -} +async fn get_book() -> Book { Book } +async fn get_music() -> Music { Music } mod naiive { - use super::*; - // ANCHOR: naiive - async fn get_book_and_music() -> (Book, Music) { - let book = get_book().await; - let music = get_music().await; - (book, music) - } - // ANCHOR_END: naiive +use super::*; +// ANCHOR: naiive +async fn get_book_and_music() -> (Book, Music) { + let book = get_book().await; + let music = get_music().await; + (book, music) +} +// ANCHOR_END: naiive } mod other_langs { - use super::*; - // ANCHOR: other_langs - // WRONG -- don't do this - async fn get_book_and_music() -> (Book, Music) { - let book_future = get_book(); - let music_future = get_music(); - (book_future.await, music_future.await) - } - // ANCHOR_END: other_langs +use super::*; +// ANCHOR: other_langs +// WRONG -- don't do this +async fn get_book_and_music() -> (Book, Music) { + let book_future = get_book(); + let music_future = get_music(); + (book_future.await, music_future.await) +} +// ANCHOR_END: other_langs } mod join { - use super::*; - // ANCHOR: join - use futures::join; +use super::*; +// ANCHOR: join +use futures::join; - async fn get_book_and_music() -> (Book, Music) { - let book_fut = get_book(); - let music_fut = get_music(); - join!(book_fut, music_fut) - } - // ANCHOR_END: join +async fn get_book_and_music() -> (Book, Music) { + let book_fut = get_book(); + let music_fut = get_music(); + join!(book_fut, music_fut) +} +// ANCHOR_END: join } mod try_join { - use super::{Book, Music}; - // ANCHOR: try_join - use futures::try_join; +use super::{Book, Music}; +// ANCHOR: try_join +use futures::try_join; - async fn get_book() -> Result { - /* ... */ - Ok(Book) - } - async fn get_music() -> Result { - /* ... */ - Ok(Music) - } +async fn get_book() -> Result { /* ... */ Ok(Book) } +async fn get_music() -> Result { /* ... */ Ok(Music) } - async fn get_book_and_music() -> Result<(Book, Music), String> { - let book_fut = get_book(); - let music_fut = get_music(); - try_join!(book_fut, music_fut) - } - // ANCHOR_END: try_join +async fn get_book_and_music() -> Result<(Book, Music), String> { + let book_fut = get_book(); + let music_fut = get_music(); + try_join!(book_fut, music_fut) +} +// ANCHOR_END: try_join } mod mismatched_err { - use super::{Book, Music}; - // ANCHOR: try_join_map_err - use futures::{future::TryFutureExt, try_join}; +use super::{Book, Music}; +// ANCHOR: try_join_map_err +use futures::{ + future::TryFutureExt, + try_join, +}; - async fn get_book() -> Result { - /* ... */ - Ok(Book) - } - async fn get_music() -> Result { - /* ... */ - Ok(Music) - } +async fn get_book() -> Result { /* ... */ Ok(Book) } +async fn get_music() -> Result { /* ... */ Ok(Music) } - async fn get_book_and_music() -> Result<(Book, Music), String> { - let book_fut = get_book().map_err(|()| "Unable to get book".to_string()); - let music_fut = get_music(); - try_join!(book_fut, music_fut) - } - // ANCHOR_END: try_join_map_err +async fn get_book_and_music() -> Result<(Book, Music), String> { + let book_fut = get_book().map_err(|()| "Unable to get book".to_string()); + let music_fut = get_music(); + try_join!(book_fut, music_fut) +} +// ANCHOR_END: try_join_map_err } diff --git a/examples/06_03_select/src/lib.rs b/examples/06_03_select/src/lib.rs index 5a4b7dba..7d79d439 100644 --- a/examples/06_03_select/src/lib.rs +++ b/examples/06_03_select/src/lib.rs @@ -1,193 +1,183 @@ #![cfg(test)] -#![recursion_limit = "128"] +#![recursion_limit="128"] mod example { - // ANCHOR: example - use futures::{ - future::FutureExt, // for `.fuse()` - pin_mut, - select, - }; - - async fn task_one() { /* ... */ - } - async fn task_two() { /* ... */ - } +// ANCHOR: example +use futures::{ + future::FutureExt, // for `.fuse()` + pin_mut, + select, +}; - async fn race_tasks() { - let t1 = task_one().fuse(); - let t2 = task_two().fuse(); +async fn task_one() { /* ... */ } +async fn task_two() { /* ... */ } - pin_mut!(t1, t2); +async fn race_tasks() { + let t1 = task_one().fuse(); + let t2 = task_two().fuse(); - select! { - () = t1 => println!("task one completed first"), - () = t2 => println!("task two completed first"), - } + pin_mut!(t1, t2); + + select! { + () = t1 => println!("task one completed first"), + () = t2 => println!("task two completed first"), } - // ANCHOR_END: example +} +// ANCHOR_END: example } mod default_and_complete { - // ANCHOR: default_and_complete - use futures::{future, select}; - - async fn count() { - let mut a_fut = future::ready(4); - let mut b_fut = future::ready(6); - let mut total = 0; - - loop { - select! { - a = a_fut => total += a, - b = b_fut => total += b, - complete => break, - default => unreachable!(), // never runs (futures are ready, then complete) - }; - } - assert_eq!(total, 10); - } - // ANCHOR_END: default_and_complete +// ANCHOR: default_and_complete +use futures::{future, select}; + +async fn count() { + let mut a_fut = future::ready(4); + let mut b_fut = future::ready(6); + let mut total = 0; - #[test] - fn run_count() { - futures::executor::block_on(count()); + loop { + select! { + a = a_fut => total += a, + b = b_fut => total += b, + complete => break, + default => unreachable!(), // never runs (futures are ready, then complete) + }; } + assert_eq!(total, 10); +} +// ANCHOR_END: default_and_complete + +#[test] +fn run_count() { + futures::executor::block_on(count()); +} } mod fused_stream { - // ANCHOR: fused_stream - use futures::{ - select, - stream::{FusedStream, Stream, StreamExt}, - }; - - async fn add_two_streams( - mut s1: impl Stream + FusedStream + Unpin, - mut s2: impl Stream + FusedStream + Unpin, - ) -> u8 { - let mut total = 0; - - loop { - let item = select! { - x = s1.next() => x, - x = s2.next() => x, - complete => break, - }; - if let Some(next_num) = item { - total += next_num; - } +// ANCHOR: fused_stream +use futures::{ + stream::{Stream, StreamExt, FusedStream}, + select, +}; + +async fn add_two_streams( + mut s1: impl Stream + FusedStream + Unpin, + mut s2: impl Stream + FusedStream + Unpin, +) -> u8 { + let mut total = 0; + + loop { + let item = select! { + x = s1.next() => x, + x = s2.next() => x, + complete => break, + }; + if let Some(next_num) = item { + total += next_num; } - - total } - // ANCHOR_END: fused_stream + + total +} +// ANCHOR_END: fused_stream } mod fuse_terminated { - // ANCHOR: fuse_terminated - use futures::{ - future::{Fuse, FusedFuture, FutureExt}, - pin_mut, select, - stream::{FusedStream, Stream, StreamExt}, - }; - - async fn get_new_num() -> u8 { - /* ... */ - 5 - } - - async fn run_on_new_num(_: u8) { /* ... */ - } - - async fn run_loop( - mut interval_timer: impl Stream + FusedStream + Unpin, - starting_num: u8, - ) { - let run_on_new_num_fut = run_on_new_num(starting_num).fuse(); - let get_new_num_fut = Fuse::terminated(); - pin_mut!(run_on_new_num_fut, get_new_num_fut); - loop { - select! { - () = interval_timer.select_next_some() => { - // The timer has elapsed. Start a new `get_new_num_fut` - // if one was not already running. - if get_new_num_fut.is_terminated() { - get_new_num_fut.set(get_new_num().fuse()); - } - }, - new_num = get_new_num_fut => { - // A new number has arrived-- start a new `run_on_new_num_fut`, - // dropping the old one. - run_on_new_num_fut.set(run_on_new_num(new_num).fuse()); - }, - // Run the `run_on_new_num_fut` - () = run_on_new_num_fut => {}, - // panic if everything completed, since the `interval_timer` should - // keep yielding values indefinitely. - complete => panic!("`interval_timer` completed unexpectedly"), - } +// ANCHOR: fuse_terminated +use futures::{ + future::{Fuse, FusedFuture, FutureExt}, + stream::{FusedStream, Stream, StreamExt}, + pin_mut, + select, +}; + +async fn get_new_num() -> u8 { /* ... */ 5 } + +async fn run_on_new_num(_: u8) { /* ... */ } + +async fn run_loop( + mut interval_timer: impl Stream + FusedStream + Unpin, + starting_num: u8, +) { + let run_on_new_num_fut = run_on_new_num(starting_num).fuse(); + let get_new_num_fut = Fuse::terminated(); + pin_mut!(run_on_new_num_fut, get_new_num_fut); + loop { + select! { + () = interval_timer.select_next_some() => { + // The timer has elapsed. Start a new `get_new_num_fut` + // if one was not already running. + if get_new_num_fut.is_terminated() { + get_new_num_fut.set(get_new_num().fuse()); + } + }, + new_num = get_new_num_fut => { + // A new number has arrived-- start a new `run_on_new_num_fut`, + // dropping the old one. + run_on_new_num_fut.set(run_on_new_num(new_num).fuse()); + }, + // Run the `run_on_new_num_fut` + () = run_on_new_num_fut => {}, + // panic if everything completed, since the `interval_timer` should + // keep yielding values indefinitely. + complete => panic!("`interval_timer` completed unexpectedly"), } } - // ANCHOR_END: fuse_terminated +} +// ANCHOR_END: fuse_terminated } mod futures_unordered { - // ANCHOR: futures_unordered - use futures::{ - future::{Fuse, FusedFuture, FutureExt}, - pin_mut, select, - stream::{FusedStream, FuturesUnordered, Stream, StreamExt}, - }; - - async fn get_new_num() -> u8 { - /* ... */ - 5 - } - - async fn run_on_new_num(_: u8) -> u8 { - /* ... */ - 5 - } - - // Runs `run_on_new_num` with the latest number - // retrieved from `get_new_num`. - // - // `get_new_num` is re-run every time a timer elapses, - // immediately cancelling the currently running - // `run_on_new_num` and replacing it with the newly - // returned value. - async fn run_loop( - mut interval_timer: impl Stream + FusedStream + Unpin, - starting_num: u8, - ) { - let mut run_on_new_num_futs = FuturesUnordered::new(); - run_on_new_num_futs.push(run_on_new_num(starting_num)); - let get_new_num_fut = Fuse::terminated(); - pin_mut!(get_new_num_fut); - loop { - select! { - () = interval_timer.select_next_some() => { - // The timer has elapsed. Start a new `get_new_num_fut` - // if one was not already running. - if get_new_num_fut.is_terminated() { - get_new_num_fut.set(get_new_num().fuse()); - } - }, - new_num = get_new_num_fut => { - // A new number has arrived-- start a new `run_on_new_num_fut`. - run_on_new_num_futs.push(run_on_new_num(new_num)); - }, - // Run the `run_on_new_num_futs` and check if any have completed - res = run_on_new_num_futs.select_next_some() => { - println!("run_on_new_num_fut returned {:?}", res); - }, - // panic if everything completed, since the `interval_timer` should - // keep yielding values indefinitely. - complete => panic!("`interval_timer` completed unexpectedly"), - } +// ANCHOR: futures_unordered +use futures::{ + future::{Fuse, FusedFuture, FutureExt}, + stream::{FusedStream, FuturesUnordered, Stream, StreamExt}, + pin_mut, + select, +}; + +async fn get_new_num() -> u8 { /* ... */ 5 } + +async fn run_on_new_num(_: u8) -> u8 { /* ... */ 5 } + +// Runs `run_on_new_num` with the latest number +// retrieved from `get_new_num`. +// +// `get_new_num` is re-run every time a timer elapses, +// immediately cancelling the currently running +// `run_on_new_num` and replacing it with the newly +// returned value. +async fn run_loop( + mut interval_timer: impl Stream + FusedStream + Unpin, + starting_num: u8, +) { + let mut run_on_new_num_futs = FuturesUnordered::new(); + run_on_new_num_futs.push(run_on_new_num(starting_num)); + let get_new_num_fut = Fuse::terminated(); + pin_mut!(get_new_num_fut); + loop { + select! { + () = interval_timer.select_next_some() => { + // The timer has elapsed. Start a new `get_new_num_fut` + // if one was not already running. + if get_new_num_fut.is_terminated() { + get_new_num_fut.set(get_new_num().fuse()); + } + }, + new_num = get_new_num_fut => { + // A new number has arrived-- start a new `run_on_new_num_fut`. + run_on_new_num_futs.push(run_on_new_num(new_num)); + }, + // Run the `run_on_new_num_futs` and check if any have completed + res = run_on_new_num_futs.select_next_some() => { + println!("run_on_new_num_fut returned {:?}", res); + }, + // panic if everything completed, since the `interval_timer` should + // keep yielding values indefinitely. + complete => panic!("`interval_timer` completed unexpectedly"), } } +} - // ANCHOR_END: futures_unordered +// ANCHOR_END: futures_unordered } diff --git a/examples/07_05_recursion/src/lib.rs b/examples/07_05_recursion/src/lib.rs index 10e8c8b4..7811cfb3 100644 --- a/examples/07_05_recursion/src/lib.rs +++ b/examples/07_05_recursion/src/lib.rs @@ -8,7 +8,6 @@ fn recursive() -> BoxFuture<'static, ()> { async move { recursive().await; recursive().await; - } - .boxed() + }.boxed() } // ANCHOR_END: example From 3ab9e17a60bdf0451394e19a991cdd74a7261ebd Mon Sep 17 00:00:00 2001 From: Lee Bernick Date: Fri, 4 Sep 2020 18:12:15 -0400 Subject: [PATCH 06/12] Revert formatting changes to other examples --- examples/01_02_why_async/src/lib.rs | 5 +- examples/01_04_async_await_primer/src/lib.rs | 110 +++--- examples/02_02_future_trait/src/lib.rs | 44 +-- examples/02_03_timer/src/lib.rs | 20 +- examples/02_04_executor/src/lib.rs | 15 +- examples/03_01_async_await/src/lib.rs | 140 ++++---- examples/05_01_streams/src/lib.rs | 102 +++--- .../src/lib.rs | 25 +- examples/06_02_join/src/lib.rs | 119 +++---- examples/06_03_select/src/lib.rs | 318 +++++++++--------- examples/07_05_recursion/src/lib.rs | 3 +- 11 files changed, 434 insertions(+), 467 deletions(-) diff --git a/examples/01_02_why_async/src/lib.rs b/examples/01_02_why_async/src/lib.rs index a43f7a8f..64758e4f 100644 --- a/examples/01_02_why_async/src/lib.rs +++ b/examples/01_02_why_async/src/lib.rs @@ -1,7 +1,10 @@ #![cfg(test)] use { - futures::{executor::block_on, join}, + futures::{ + executor::block_on, + join, + }, std::thread, }; diff --git a/examples/01_04_async_await_primer/src/lib.rs b/examples/01_04_async_await_primer/src/lib.rs index 4425e22e..1a2c14eb 100644 --- a/examples/01_04_async_await_primer/src/lib.rs +++ b/examples/01_04_async_await_primer/src/lib.rs @@ -3,81 +3,73 @@ use futures::executor::block_on; mod first { - // ANCHOR: hello_world - // `block_on` blocks the current thread until the provided future has run to - // completion. Other executors provide more complex behavior, like scheduling - // multiple futures onto the same thread. - use futures::executor::block_on; +// ANCHOR: hello_world +// `block_on` blocks the current thread until the provided future has run to +// completion. Other executors provide more complex behavior, like scheduling +// multiple futures onto the same thread. +use futures::executor::block_on; - async fn hello_world() { - println!("hello, world!"); - } +async fn hello_world() { + println!("hello, world!"); +} - fn main() { - let future = hello_world(); // Nothing is printed - block_on(future); // `future` is run and "hello, world!" is printed - } - // ANCHOR_END: hello_world +fn main() { + let future = hello_world(); // Nothing is printed + block_on(future); // `future` is run and "hello, world!" is printed +} +// ANCHOR_END: hello_world - #[test] - fn run_main() { - main() - } +#[test] +fn run_main() { main() } } struct Song; -async fn learn_song() -> Song { - Song -} +async fn learn_song() -> Song { Song } async fn sing_song(_: Song) {} async fn dance() {} mod second { - use super::*; - // ANCHOR: block_on_each - fn main() { - let song = block_on(learn_song()); - block_on(sing_song(song)); - block_on(dance()); - } - // ANCHOR_END: block_on_each +use super::*; +// ANCHOR: block_on_each +fn main() { + let song = block_on(learn_song()); + block_on(sing_song(song)); + block_on(dance()); +} +// ANCHOR_END: block_on_each - #[test] - fn run_main() { - main() - } +#[test] +fn run_main() { main() } } mod third { - use super::*; - // ANCHOR: block_on_main - async fn learn_and_sing() { - // Wait until the song has been learned before singing it. - // We use `.await` here rather than `block_on` to prevent blocking the - // thread, which makes it possible to `dance` at the same time. - let song = learn_song().await; - sing_song(song).await; - } +use super::*; +// ANCHOR: block_on_main +async fn learn_and_sing() { + // Wait until the song has been learned before singing it. + // We use `.await` here rather than `block_on` to prevent blocking the + // thread, which makes it possible to `dance` at the same time. + let song = learn_song().await; + sing_song(song).await; +} - async fn async_main() { - let f1 = learn_and_sing(); - let f2 = dance(); +async fn async_main() { + let f1 = learn_and_sing(); + let f2 = dance(); - // `join!` is like `.await` but can wait for multiple futures concurrently. - // If we're temporarily blocked in the `learn_and_sing` future, the `dance` - // future will take over the current thread. If `dance` becomes blocked, - // `learn_and_sing` can take back over. If both futures are blocked, then - // `async_main` is blocked and will yield to the executor. - futures::join!(f1, f2); - } + // `join!` is like `.await` but can wait for multiple futures concurrently. + // If we're temporarily blocked in the `learn_and_sing` future, the `dance` + // future will take over the current thread. If `dance` becomes blocked, + // `learn_and_sing` can take back over. If both futures are blocked, then + // `async_main` is blocked and will yield to the executor. + futures::join!(f1, f2); +} - fn main() { - block_on(async_main()); - } - // ANCHOR_END: block_on_main +fn main() { + block_on(async_main()); +} +// ANCHOR_END: block_on_main - #[test] - fn run_main() { - main() - } +#[test] +fn run_main() { main() } } diff --git a/examples/02_02_future_trait/src/lib.rs b/examples/02_02_future_trait/src/lib.rs index 790568c0..79e14d6c 100644 --- a/examples/02_02_future_trait/src/lib.rs +++ b/examples/02_02_future_trait/src/lib.rs @@ -135,29 +135,29 @@ where // ANCHOR_END: and_then mod real_future { - use std::{ - future::Future as RealFuture, - pin::Pin, - task::{Context, Poll}, - }; +use std::{ + future::Future as RealFuture, + pin::Pin, + task::{Context, Poll}, +}; - // ANCHOR: real_future - trait Future { - type Output; - fn poll( - // Note the change from `&mut self` to `Pin<&mut Self>`: - self: Pin<&mut Self>, - // and the change from `wake: fn()` to `cx: &mut Context<'_>`: - cx: &mut Context<'_>, - ) -> Poll; - } - // ANCHOR_END: real_future +// ANCHOR: real_future +trait Future { + type Output; + fn poll( + // Note the change from `&mut self` to `Pin<&mut Self>`: + self: Pin<&mut Self>, + // and the change from `wake: fn()` to `cx: &mut Context<'_>`: + cx: &mut Context<'_>, + ) -> Poll; +} +// ANCHOR_END: real_future - // ensure that `Future` matches `RealFuture`: - impl Future for dyn RealFuture { - type Output = O; - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - RealFuture::poll(self, cx) - } +// ensure that `Future` matches `RealFuture`: +impl Future for dyn RealFuture { + type Output = O; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + RealFuture::poll(self, cx) } } +} diff --git a/examples/02_03_timer/src/lib.rs b/examples/02_03_timer/src/lib.rs index ba98a369..fc828949 100644 --- a/examples/02_03_timer/src/lib.rs +++ b/examples/02_03_timer/src/lib.rs @@ -1,11 +1,13 @@ // ANCHOR: imports -use std::{ - future::Future, - pin::Pin, - sync::{Arc, Mutex}, - task::{Context, Poll, Waker}, - thread, - time::Duration, +use { + std::{ + future::Future, + pin::Pin, + sync::{Arc, Mutex}, + task::{Context, Poll, Waker}, + thread, + time::Duration, + }, }; // ANCHOR_END: imports @@ -85,5 +87,7 @@ impl TimerFuture { #[test] fn block_on_timer() { - futures::executor::block_on(async { TimerFuture::new(Duration::from_secs(1)).await }) + futures::executor::block_on(async { + TimerFuture::new(Duration::from_secs(1)).await + }) } diff --git a/examples/02_04_executor/src/lib.rs b/examples/02_04_executor/src/lib.rs index e5120cd9..58ed7b7a 100644 --- a/examples/02_04_executor/src/lib.rs +++ b/examples/02_04_executor/src/lib.rs @@ -3,13 +3,13 @@ // ANCHOR: imports use { futures::{ - future::{BoxFuture, FutureExt}, - task::{waker_ref, ArcWake}, + future::{FutureExt, BoxFuture}, + task::{ArcWake, waker_ref}, }, std::{ future::Future, - sync::mpsc::{sync_channel, Receiver, SyncSender}, sync::{Arc, Mutex}, + sync::mpsc::{sync_channel, SyncSender, Receiver}, task::{Context, Poll}, time::Duration, }, @@ -74,10 +74,7 @@ impl ArcWake for Task { // Implement `wake` by sending this task back onto the task channel // so that it will be polled again by the executor. let cloned = arc_self.clone(); - arc_self - .task_sender - .send(cloned) - .expect("too many tasks queued"); + arc_self.task_sender.send(cloned).expect("too many tasks queued"); } } // ANCHOR_END: arcwake_for_task @@ -131,6 +128,4 @@ fn main() { // ANCHOR_END: main #[test] -fn run_main() { - main() -} +fn run_main() { main() } diff --git a/examples/03_01_async_await/src/lib.rs b/examples/03_01_async_await/src/lib.rs index d145f4f6..9510a7d2 100644 --- a/examples/03_01_async_await/src/lib.rs +++ b/examples/03_01_async_await/src/lib.rs @@ -2,96 +2,90 @@ #![cfg(test)] mod async_fn_and_block_examples { - use std::future::Future; - // ANCHOR: async_fn_and_block_examples +use std::future::Future; +// ANCHOR: async_fn_and_block_examples - // `foo()` returns a type that implements `Future`. - // `foo().await` will result in a value of type `u8`. - async fn foo() -> u8 { - 5 - } +// `foo()` returns a type that implements `Future`. +// `foo().await` will result in a value of type `u8`. +async fn foo() -> u8 { 5 } - fn bar() -> impl Future { - // This `async` block results in a type that implements - // `Future`. - async { - let x: u8 = foo().await; - x + 5 - } +fn bar() -> impl Future { + // This `async` block results in a type that implements + // `Future`. + async { + let x: u8 = foo().await; + x + 5 } - // ANCHOR_END: async_fn_and_block_examples +} +// ANCHOR_END: async_fn_and_block_examples } mod async_lifetimes_examples { - use std::future::Future; - // ANCHOR: lifetimes_expanded - // This function: - async fn foo(x: &u8) -> u8 { - *x - } +use std::future::Future; +// ANCHOR: lifetimes_expanded +// This function: +async fn foo(x: &u8) -> u8 { *x } - // Is equivalent to this function: - fn foo_expanded<'a>(x: &'a u8) -> impl Future + 'a { - async move { *x } - } - // ANCHOR_END: lifetimes_expanded +// Is equivalent to this function: +fn foo_expanded<'a>(x: &'a u8) -> impl Future + 'a { + async move { *x } +} +// ANCHOR_END: lifetimes_expanded - async fn borrow_x(x: &u8) -> u8 { - *x - } +async fn borrow_x(x: &u8) -> u8 { *x } - #[cfg(feature = "never_compiled")] - // ANCHOR: static_future_with_borrow - fn bad() -> impl Future { - let x = 5; - borrow_x(&x) // ERROR: `x` does not live long enough - } +#[cfg(feature = "never_compiled")] +// ANCHOR: static_future_with_borrow +fn bad() -> impl Future { + let x = 5; + borrow_x(&x) // ERROR: `x` does not live long enough +} - fn good() -> impl Future { - async { - let x = 5; - borrow_x(&x).await - } +fn good() -> impl Future { + async { + let x = 5; + borrow_x(&x).await } - // ANCHOR_END: static_future_with_borrow +} +// ANCHOR_END: static_future_with_borrow } mod async_move_examples { - use std::future::Future; - // ANCHOR: async_move_examples - /// `async` block: - /// - /// Multiple different `async` blocks can access the same local variable - /// so long as they're executed within the variable's scope - async fn blocks() { - let my_string = "foo".to_string(); +use std::future::Future; +// ANCHOR: async_move_examples +/// `async` block: +/// +/// Multiple different `async` blocks can access the same local variable +/// so long as they're executed within the variable's scope +async fn blocks() { + let my_string = "foo".to_string(); - let future_one = async { - // ... - println!("{}", my_string); - }; + let future_one = async { + // ... + println!("{}", my_string); + }; - let future_two = async { - // ... - println!("{}", my_string); - }; + let future_two = async { + // ... + println!("{}", my_string); + }; - // Run both futures to completion, printing "foo" twice: - let ((), ()) = futures::join!(future_one, future_two); - } + // Run both futures to completion, printing "foo" twice: + let ((), ()) = futures::join!(future_one, future_two); +} - /// `async move` block: - /// - /// Only one `async move` block can access the same captured variable, since - /// captures are moved into the `Future` generated by the `async move` block. - /// However, this allows the `Future` to outlive the original scope of the - /// variable: - fn move_block() -> impl Future { - let my_string = "foo".to_string(); - async move { - // ... - println!("{}", my_string); - } +/// `async move` block: +/// +/// Only one `async move` block can access the same captured variable, since +/// captures are moved into the `Future` generated by the `async move` block. +/// However, this allows the `Future` to outlive the original scope of the +/// variable: +fn move_block() -> impl Future { + let my_string = "foo".to_string(); + async move { + // ... + println!("{}", my_string); } - // ANCHOR_END: async_move_examples +} +// ANCHOR_END: async_move_examples } diff --git a/examples/05_01_streams/src/lib.rs b/examples/05_01_streams/src/lib.rs index 09261889..d68fc7e3 100644 --- a/examples/05_01_streams/src/lib.rs +++ b/examples/05_01_streams/src/lib.rs @@ -1,57 +1,63 @@ #![cfg(test)] mod stream_trait { - use { - futures::stream::Stream as RealStream, - std::{ - pin::Pin, - task::{Context, Poll}, - }, - }; - - // ANCHOR: stream_trait - trait Stream { - /// The type of the value yielded by the stream. - type Item; - - /// Attempt to resolve the next item in the stream. - /// Returns `Poll::Pending` if not ready, `Poll::Ready(Some(x))` if a value - /// is ready, and `Poll::Ready(None)` if the stream has completed. - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - } - // ANCHOR_END: stream_trait - - // assert that `Stream` matches `RealStream`: - impl Stream for dyn RealStream { - type Item = I; - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - RealStream::poll_next(self, cx) - } +use { + futures::stream::{Stream as RealStream}, + std::{ + pin::Pin, + task::{Context, Poll}, + }, +}; + +// ANCHOR: stream_trait +trait Stream { + /// The type of the value yielded by the stream. + type Item; + + /// Attempt to resolve the next item in the stream. + /// Returns `Poll::Pending` if not ready, `Poll::Ready(Some(x))` if a value + /// is ready, and `Poll::Ready(None)` if the stream has completed. + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) + -> Poll>; +} +// ANCHOR_END: stream_trait + +// assert that `Stream` matches `RealStream`: +impl Stream for dyn RealStream { + type Item = I; + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) + -> Poll> + { + RealStream::poll_next(self, cx) } } +} mod channels { - use futures::{channel::mpsc, prelude::*}; - - // ANCHOR: channels - async fn send_recv() { - const BUFFER_SIZE: usize = 10; - let (mut tx, mut rx) = mpsc::channel::(BUFFER_SIZE); - - tx.send(1).await.unwrap(); - tx.send(2).await.unwrap(); - drop(tx); - - // `StreamExt::next` is similar to `Iterator::next`, but returns a - // type that implements `Future>`. - assert_eq!(Some(1), rx.next().await); - assert_eq!(Some(2), rx.next().await); - assert_eq!(None, rx.next().await); - } - // ANCHOR_END: channels +use { + futures::{ + channel::mpsc, + prelude::*, + }, +}; + +// ANCHOR: channels +async fn send_recv() { + const BUFFER_SIZE: usize = 10; + let (mut tx, mut rx) = mpsc::channel::(BUFFER_SIZE); + + tx.send(1).await.unwrap(); + tx.send(2).await.unwrap(); + drop(tx); + + // `StreamExt::next` is similar to `Iterator::next`, but returns a + // type that implements `Future>`. + assert_eq!(Some(1), rx.next().await); + assert_eq!(Some(2), rx.next().await); + assert_eq!(None, rx.next().await); +} +// ANCHOR_END: channels - #[test] - fn run_send_recv() { - futures::executor::block_on(send_recv()) - } +#[test] +fn run_send_recv() { futures::executor::block_on(send_recv()) } } diff --git a/examples/05_02_iteration_and_concurrency/src/lib.rs b/examples/05_02_iteration_and_concurrency/src/lib.rs index 18f2f93a..6eb36a14 100644 --- a/examples/05_02_iteration_and_concurrency/src/lib.rs +++ b/examples/05_02_iteration_and_concurrency/src/lib.rs @@ -5,7 +5,10 @@ use { executor::block_on, stream::{self, Stream}, }, - std::{io, pin::Pin}, + std::{ + io, + pin::Pin, + }, }; // ANCHOR: nexts @@ -52,21 +55,15 @@ async fn jump_around( use futures::stream::TryStreamExt; // for `try_for_each_concurrent` const MAX_CONCURRENT_JUMPERS: usize = 100; - stream - .try_for_each_concurrent(MAX_CONCURRENT_JUMPERS, |num| async move { - jump_n_times(num).await?; - report_n_jumps(num).await?; - Ok(()) - }) - .await?; + stream.try_for_each_concurrent(MAX_CONCURRENT_JUMPERS, |num| async move { + jump_n_times(num).await?; + report_n_jumps(num).await?; + Ok(()) + }).await?; Ok(()) } // ANCHOR_END: try_for_each_concurrent -async fn jump_n_times(_: u8) -> Result<(), io::Error> { - Ok(()) -} -async fn report_n_jumps(_: u8) -> Result<(), io::Error> { - Ok(()) -} +async fn jump_n_times(_: u8) -> Result<(), io::Error> { Ok(()) } +async fn report_n_jumps(_: u8) -> Result<(), io::Error> { Ok(()) } diff --git a/examples/06_02_join/src/lib.rs b/examples/06_02_join/src/lib.rs index 5461db23..b2cd8719 100644 --- a/examples/06_02_join/src/lib.rs +++ b/examples/06_02_join/src/lib.rs @@ -2,89 +2,76 @@ struct Book; struct Music; -async fn get_book() -> Book { - Book -} -async fn get_music() -> Music { - Music -} +async fn get_book() -> Book { Book } +async fn get_music() -> Music { Music } mod naiive { - use super::*; - // ANCHOR: naiive - async fn get_book_and_music() -> (Book, Music) { - let book = get_book().await; - let music = get_music().await; - (book, music) - } - // ANCHOR_END: naiive +use super::*; +// ANCHOR: naiive +async fn get_book_and_music() -> (Book, Music) { + let book = get_book().await; + let music = get_music().await; + (book, music) +} +// ANCHOR_END: naiive } mod other_langs { - use super::*; - // ANCHOR: other_langs - // WRONG -- don't do this - async fn get_book_and_music() -> (Book, Music) { - let book_future = get_book(); - let music_future = get_music(); - (book_future.await, music_future.await) - } - // ANCHOR_END: other_langs +use super::*; +// ANCHOR: other_langs +// WRONG -- don't do this +async fn get_book_and_music() -> (Book, Music) { + let book_future = get_book(); + let music_future = get_music(); + (book_future.await, music_future.await) +} +// ANCHOR_END: other_langs } mod join { - use super::*; - // ANCHOR: join - use futures::join; +use super::*; +// ANCHOR: join +use futures::join; - async fn get_book_and_music() -> (Book, Music) { - let book_fut = get_book(); - let music_fut = get_music(); - join!(book_fut, music_fut) - } - // ANCHOR_END: join +async fn get_book_and_music() -> (Book, Music) { + let book_fut = get_book(); + let music_fut = get_music(); + join!(book_fut, music_fut) +} +// ANCHOR_END: join } mod try_join { - use super::{Book, Music}; - // ANCHOR: try_join - use futures::try_join; +use super::{Book, Music}; +// ANCHOR: try_join +use futures::try_join; - async fn get_book() -> Result { - /* ... */ - Ok(Book) - } - async fn get_music() -> Result { - /* ... */ - Ok(Music) - } +async fn get_book() -> Result { /* ... */ Ok(Book) } +async fn get_music() -> Result { /* ... */ Ok(Music) } - async fn get_book_and_music() -> Result<(Book, Music), String> { - let book_fut = get_book(); - let music_fut = get_music(); - try_join!(book_fut, music_fut) - } - // ANCHOR_END: try_join +async fn get_book_and_music() -> Result<(Book, Music), String> { + let book_fut = get_book(); + let music_fut = get_music(); + try_join!(book_fut, music_fut) +} +// ANCHOR_END: try_join } mod mismatched_err { - use super::{Book, Music}; - // ANCHOR: try_join_map_err - use futures::{future::TryFutureExt, try_join}; +use super::{Book, Music}; +// ANCHOR: try_join_map_err +use futures::{ + future::TryFutureExt, + try_join, +}; - async fn get_book() -> Result { - /* ... */ - Ok(Book) - } - async fn get_music() -> Result { - /* ... */ - Ok(Music) - } +async fn get_book() -> Result { /* ... */ Ok(Book) } +async fn get_music() -> Result { /* ... */ Ok(Music) } - async fn get_book_and_music() -> Result<(Book, Music), String> { - let book_fut = get_book().map_err(|()| "Unable to get book".to_string()); - let music_fut = get_music(); - try_join!(book_fut, music_fut) - } - // ANCHOR_END: try_join_map_err +async fn get_book_and_music() -> Result<(Book, Music), String> { + let book_fut = get_book().map_err(|()| "Unable to get book".to_string()); + let music_fut = get_music(); + try_join!(book_fut, music_fut) +} +// ANCHOR_END: try_join_map_err } diff --git a/examples/06_03_select/src/lib.rs b/examples/06_03_select/src/lib.rs index 5a4b7dba..7d79d439 100644 --- a/examples/06_03_select/src/lib.rs +++ b/examples/06_03_select/src/lib.rs @@ -1,193 +1,183 @@ #![cfg(test)] -#![recursion_limit = "128"] +#![recursion_limit="128"] mod example { - // ANCHOR: example - use futures::{ - future::FutureExt, // for `.fuse()` - pin_mut, - select, - }; - - async fn task_one() { /* ... */ - } - async fn task_two() { /* ... */ - } +// ANCHOR: example +use futures::{ + future::FutureExt, // for `.fuse()` + pin_mut, + select, +}; - async fn race_tasks() { - let t1 = task_one().fuse(); - let t2 = task_two().fuse(); +async fn task_one() { /* ... */ } +async fn task_two() { /* ... */ } - pin_mut!(t1, t2); +async fn race_tasks() { + let t1 = task_one().fuse(); + let t2 = task_two().fuse(); - select! { - () = t1 => println!("task one completed first"), - () = t2 => println!("task two completed first"), - } + pin_mut!(t1, t2); + + select! { + () = t1 => println!("task one completed first"), + () = t2 => println!("task two completed first"), } - // ANCHOR_END: example +} +// ANCHOR_END: example } mod default_and_complete { - // ANCHOR: default_and_complete - use futures::{future, select}; - - async fn count() { - let mut a_fut = future::ready(4); - let mut b_fut = future::ready(6); - let mut total = 0; - - loop { - select! { - a = a_fut => total += a, - b = b_fut => total += b, - complete => break, - default => unreachable!(), // never runs (futures are ready, then complete) - }; - } - assert_eq!(total, 10); - } - // ANCHOR_END: default_and_complete +// ANCHOR: default_and_complete +use futures::{future, select}; + +async fn count() { + let mut a_fut = future::ready(4); + let mut b_fut = future::ready(6); + let mut total = 0; - #[test] - fn run_count() { - futures::executor::block_on(count()); + loop { + select! { + a = a_fut => total += a, + b = b_fut => total += b, + complete => break, + default => unreachable!(), // never runs (futures are ready, then complete) + }; } + assert_eq!(total, 10); +} +// ANCHOR_END: default_and_complete + +#[test] +fn run_count() { + futures::executor::block_on(count()); +} } mod fused_stream { - // ANCHOR: fused_stream - use futures::{ - select, - stream::{FusedStream, Stream, StreamExt}, - }; - - async fn add_two_streams( - mut s1: impl Stream + FusedStream + Unpin, - mut s2: impl Stream + FusedStream + Unpin, - ) -> u8 { - let mut total = 0; - - loop { - let item = select! { - x = s1.next() => x, - x = s2.next() => x, - complete => break, - }; - if let Some(next_num) = item { - total += next_num; - } +// ANCHOR: fused_stream +use futures::{ + stream::{Stream, StreamExt, FusedStream}, + select, +}; + +async fn add_two_streams( + mut s1: impl Stream + FusedStream + Unpin, + mut s2: impl Stream + FusedStream + Unpin, +) -> u8 { + let mut total = 0; + + loop { + let item = select! { + x = s1.next() => x, + x = s2.next() => x, + complete => break, + }; + if let Some(next_num) = item { + total += next_num; } - - total } - // ANCHOR_END: fused_stream + + total +} +// ANCHOR_END: fused_stream } mod fuse_terminated { - // ANCHOR: fuse_terminated - use futures::{ - future::{Fuse, FusedFuture, FutureExt}, - pin_mut, select, - stream::{FusedStream, Stream, StreamExt}, - }; - - async fn get_new_num() -> u8 { - /* ... */ - 5 - } - - async fn run_on_new_num(_: u8) { /* ... */ - } - - async fn run_loop( - mut interval_timer: impl Stream + FusedStream + Unpin, - starting_num: u8, - ) { - let run_on_new_num_fut = run_on_new_num(starting_num).fuse(); - let get_new_num_fut = Fuse::terminated(); - pin_mut!(run_on_new_num_fut, get_new_num_fut); - loop { - select! { - () = interval_timer.select_next_some() => { - // The timer has elapsed. Start a new `get_new_num_fut` - // if one was not already running. - if get_new_num_fut.is_terminated() { - get_new_num_fut.set(get_new_num().fuse()); - } - }, - new_num = get_new_num_fut => { - // A new number has arrived-- start a new `run_on_new_num_fut`, - // dropping the old one. - run_on_new_num_fut.set(run_on_new_num(new_num).fuse()); - }, - // Run the `run_on_new_num_fut` - () = run_on_new_num_fut => {}, - // panic if everything completed, since the `interval_timer` should - // keep yielding values indefinitely. - complete => panic!("`interval_timer` completed unexpectedly"), - } +// ANCHOR: fuse_terminated +use futures::{ + future::{Fuse, FusedFuture, FutureExt}, + stream::{FusedStream, Stream, StreamExt}, + pin_mut, + select, +}; + +async fn get_new_num() -> u8 { /* ... */ 5 } + +async fn run_on_new_num(_: u8) { /* ... */ } + +async fn run_loop( + mut interval_timer: impl Stream + FusedStream + Unpin, + starting_num: u8, +) { + let run_on_new_num_fut = run_on_new_num(starting_num).fuse(); + let get_new_num_fut = Fuse::terminated(); + pin_mut!(run_on_new_num_fut, get_new_num_fut); + loop { + select! { + () = interval_timer.select_next_some() => { + // The timer has elapsed. Start a new `get_new_num_fut` + // if one was not already running. + if get_new_num_fut.is_terminated() { + get_new_num_fut.set(get_new_num().fuse()); + } + }, + new_num = get_new_num_fut => { + // A new number has arrived-- start a new `run_on_new_num_fut`, + // dropping the old one. + run_on_new_num_fut.set(run_on_new_num(new_num).fuse()); + }, + // Run the `run_on_new_num_fut` + () = run_on_new_num_fut => {}, + // panic if everything completed, since the `interval_timer` should + // keep yielding values indefinitely. + complete => panic!("`interval_timer` completed unexpectedly"), } } - // ANCHOR_END: fuse_terminated +} +// ANCHOR_END: fuse_terminated } mod futures_unordered { - // ANCHOR: futures_unordered - use futures::{ - future::{Fuse, FusedFuture, FutureExt}, - pin_mut, select, - stream::{FusedStream, FuturesUnordered, Stream, StreamExt}, - }; - - async fn get_new_num() -> u8 { - /* ... */ - 5 - } - - async fn run_on_new_num(_: u8) -> u8 { - /* ... */ - 5 - } - - // Runs `run_on_new_num` with the latest number - // retrieved from `get_new_num`. - // - // `get_new_num` is re-run every time a timer elapses, - // immediately cancelling the currently running - // `run_on_new_num` and replacing it with the newly - // returned value. - async fn run_loop( - mut interval_timer: impl Stream + FusedStream + Unpin, - starting_num: u8, - ) { - let mut run_on_new_num_futs = FuturesUnordered::new(); - run_on_new_num_futs.push(run_on_new_num(starting_num)); - let get_new_num_fut = Fuse::terminated(); - pin_mut!(get_new_num_fut); - loop { - select! { - () = interval_timer.select_next_some() => { - // The timer has elapsed. Start a new `get_new_num_fut` - // if one was not already running. - if get_new_num_fut.is_terminated() { - get_new_num_fut.set(get_new_num().fuse()); - } - }, - new_num = get_new_num_fut => { - // A new number has arrived-- start a new `run_on_new_num_fut`. - run_on_new_num_futs.push(run_on_new_num(new_num)); - }, - // Run the `run_on_new_num_futs` and check if any have completed - res = run_on_new_num_futs.select_next_some() => { - println!("run_on_new_num_fut returned {:?}", res); - }, - // panic if everything completed, since the `interval_timer` should - // keep yielding values indefinitely. - complete => panic!("`interval_timer` completed unexpectedly"), - } +// ANCHOR: futures_unordered +use futures::{ + future::{Fuse, FusedFuture, FutureExt}, + stream::{FusedStream, FuturesUnordered, Stream, StreamExt}, + pin_mut, + select, +}; + +async fn get_new_num() -> u8 { /* ... */ 5 } + +async fn run_on_new_num(_: u8) -> u8 { /* ... */ 5 } + +// Runs `run_on_new_num` with the latest number +// retrieved from `get_new_num`. +// +// `get_new_num` is re-run every time a timer elapses, +// immediately cancelling the currently running +// `run_on_new_num` and replacing it with the newly +// returned value. +async fn run_loop( + mut interval_timer: impl Stream + FusedStream + Unpin, + starting_num: u8, +) { + let mut run_on_new_num_futs = FuturesUnordered::new(); + run_on_new_num_futs.push(run_on_new_num(starting_num)); + let get_new_num_fut = Fuse::terminated(); + pin_mut!(get_new_num_fut); + loop { + select! { + () = interval_timer.select_next_some() => { + // The timer has elapsed. Start a new `get_new_num_fut` + // if one was not already running. + if get_new_num_fut.is_terminated() { + get_new_num_fut.set(get_new_num().fuse()); + } + }, + new_num = get_new_num_fut => { + // A new number has arrived-- start a new `run_on_new_num_fut`. + run_on_new_num_futs.push(run_on_new_num(new_num)); + }, + // Run the `run_on_new_num_futs` and check if any have completed + res = run_on_new_num_futs.select_next_some() => { + println!("run_on_new_num_fut returned {:?}", res); + }, + // panic if everything completed, since the `interval_timer` should + // keep yielding values indefinitely. + complete => panic!("`interval_timer` completed unexpectedly"), } } +} - // ANCHOR_END: futures_unordered +// ANCHOR_END: futures_unordered } diff --git a/examples/07_05_recursion/src/lib.rs b/examples/07_05_recursion/src/lib.rs index 10e8c8b4..7811cfb3 100644 --- a/examples/07_05_recursion/src/lib.rs +++ b/examples/07_05_recursion/src/lib.rs @@ -8,7 +8,6 @@ fn recursive() -> BoxFuture<'static, ()> { async move { recursive().await; recursive().await; - } - .boxed() + }.boxed() } // ANCHOR_END: example From 57093a679a3364e1de69b4f59a497d94b8de6867 Mon Sep 17 00:00:00 2001 From: Lee Bernick Date: Fri, 4 Sep 2020 18:47:25 -0400 Subject: [PATCH 07/12] Revert formatting for example 2 --- examples/02_04_executor/src/lib.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/examples/02_04_executor/src/lib.rs b/examples/02_04_executor/src/lib.rs index 58ed7b7a..e5120cd9 100644 --- a/examples/02_04_executor/src/lib.rs +++ b/examples/02_04_executor/src/lib.rs @@ -3,13 +3,13 @@ // ANCHOR: imports use { futures::{ - future::{FutureExt, BoxFuture}, - task::{ArcWake, waker_ref}, + future::{BoxFuture, FutureExt}, + task::{waker_ref, ArcWake}, }, std::{ future::Future, + sync::mpsc::{sync_channel, Receiver, SyncSender}, sync::{Arc, Mutex}, - sync::mpsc::{sync_channel, SyncSender, Receiver}, task::{Context, Poll}, time::Duration, }, @@ -74,7 +74,10 @@ impl ArcWake for Task { // Implement `wake` by sending this task back onto the task channel // so that it will be polled again by the executor. let cloned = arc_self.clone(); - arc_self.task_sender.send(cloned).expect("too many tasks queued"); + arc_self + .task_sender + .send(cloned) + .expect("too many tasks queued"); } } // ANCHOR_END: arcwake_for_task @@ -128,4 +131,6 @@ fn main() { // ANCHOR_END: main #[test] -fn run_main() { main() } +fn run_main() { + main() +} From 2a77cbd47ab0275f7bfd09899a6ce11ed349602b Mon Sep 17 00:00:00 2001 From: Lee Bernick Date: Thu, 10 Sep 2020 17:20:29 -0400 Subject: [PATCH 08/12] Introduce concurrency before multithreading --- ci/dictionary.txt | 3 ++ .../08_03_concurrent_tcp_server/src/main.rs | 19 ------- .../Cargo.toml | 2 +- examples/08_03_slow_request/src/main.rs | 40 ++++++++++++++ .../Cargo.toml | 3 ++ .../08_04_concurrent_tcp_server/src/main.rs | 31 +++++++++++ .../08_05_multithreaded_tcp_server/Cargo.toml | 11 ++++ .../src/main.rs | 2 - .../Cargo.toml | 0 .../hello.html | 0 .../src/main.rs | 2 +- examples/Cargo.toml | 7 +-- src/08_example/00_intro.md | 1 - src/08_example/01_running_async_code.md | 16 +++++- .../02_handling_connections_concurrently.md | 54 ++++++++++--------- src/08_example/03_combinators.md | 23 +++----- src/08_example/04_multithreading.md | 26 +++++++++ src/08_example/{04_tests.md => 05_tests.md} | 8 +-- src/SUMMARY.md | 3 +- 19 files changed, 177 insertions(+), 74 deletions(-) delete mode 100644 examples/08_03_concurrent_tcp_server/src/main.rs rename examples/{08_04_nonblocking_tcp_server => 08_03_slow_request}/Cargo.toml (88%) create mode 100644 examples/08_03_slow_request/src/main.rs rename examples/{08_03_concurrent_tcp_server => 08_04_concurrent_tcp_server}/Cargo.toml (89%) create mode 100644 examples/08_04_concurrent_tcp_server/src/main.rs create mode 100644 examples/08_05_multithreaded_tcp_server/Cargo.toml rename examples/{08_04_nonblocking_tcp_server => 08_05_multithreaded_tcp_server}/src/main.rs (91%) rename examples/{08_05_final_tcp_server => 08_06_final_tcp_server}/Cargo.toml (100%) rename examples/{08_05_final_tcp_server => 08_06_final_tcp_server}/hello.html (100%) rename examples/{08_05_final_tcp_server => 08_06_final_tcp_server}/src/main.rs (98%) create mode 100644 src/08_example/04_multithreading.md rename src/08_example/{04_tests.md => 05_tests.md} (90%) diff --git a/ci/dictionary.txt b/ci/dictionary.txt index 89bfdc77..64e74c3e 100644 --- a/ci/dictionary.txt +++ b/ci/dictionary.txt @@ -24,6 +24,7 @@ FutOne FutTwo FuturesUnordered GenFuture +gRPC html http Hyper's @@ -35,6 +36,7 @@ IOCP IoObject kqueue localhost +LocalExecutor metadata MockTcpStream multithreaded @@ -65,6 +67,7 @@ runtimes rustc rustup SimpleFuture +smol SocketRead SomeType spawner diff --git a/examples/08_03_concurrent_tcp_server/src/main.rs b/examples/08_03_concurrent_tcp_server/src/main.rs deleted file mode 100644 index 56f9b2d5..00000000 --- a/examples/08_03_concurrent_tcp_server/src/main.rs +++ /dev/null @@ -1,19 +0,0 @@ -use std::net::TcpListener; -use std::net::TcpStream; - -// ANCHOR: main_func -use async_std::task::spawn; - -#[async_std::main] -async fn main() { - let listener = TcpListener::bind("127.0.0.1:7878").unwrap(); - for stream in listener.incoming() { - let stream = stream.unwrap(); - spawn(handle_connection(stream)); - } -} -// ANCHOR_END: main_func - -async fn handle_connection(mut stream: TcpStream) { - //<-- snip --> -} diff --git a/examples/08_04_nonblocking_tcp_server/Cargo.toml b/examples/08_03_slow_request/Cargo.toml similarity index 88% rename from examples/08_04_nonblocking_tcp_server/Cargo.toml rename to examples/08_03_slow_request/Cargo.toml index 9a4e7b97..5a846ab0 100644 --- a/examples/08_04_nonblocking_tcp_server/Cargo.toml +++ b/examples/08_03_slow_request/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "nonblocking_tcp_server" +name = "slow_request" version = "0.1.0" authors = ["Your Name + stream.write(response.as_bytes()).await.unwrap(); + stream.flush().await.unwrap(); +} +// ANCHOR_END: handle_connection diff --git a/examples/08_05_multithreaded_tcp_server/Cargo.toml b/examples/08_05_multithreaded_tcp_server/Cargo.toml new file mode 100644 index 00000000..c917492f --- /dev/null +++ b/examples/08_05_multithreaded_tcp_server/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "multithreaded_tcp_server" +version = "0.1.0" +authors = ["Your Name Date: Mon, 14 Sep 2020 19:23:54 -0400 Subject: [PATCH 09/12] Address second round of comments from Didrik --- ci/dictionary.txt | 1 + .../08_04_concurrent_tcp_server/Cargo.toml | 2 +- .../08_04_concurrent_tcp_server/src/main.rs | 6 +-- .../Cargo.toml | 0 .../hello.html | 0 .../src/main.rs | 54 ++++--------------- .../08_05_multithreaded_tcp_server/Cargo.toml | 11 ---- .../src/main.rs | 26 --------- examples/Cargo.toml | 3 +- src/08_example/01_running_async_code.md | 13 +++-- .../02_handling_connections_concurrently.md | 18 ++++--- src/08_example/03_combinators.md | 27 ---------- ...multithreading.md => 03_multithreading.md} | 15 +++--- src/08_example/{05_tests.md => 04_tests.md} | 26 ++++----- src/SUMMARY.md | 5 +- 15 files changed, 58 insertions(+), 149 deletions(-) rename examples/{08_06_final_tcp_server => 08_05_final_tcp_server}/Cargo.toml (100%) rename examples/{08_06_final_tcp_server => 08_05_final_tcp_server}/hello.html (100%) rename examples/{08_06_final_tcp_server => 08_05_final_tcp_server}/src/main.rs (74%) delete mode 100644 examples/08_05_multithreaded_tcp_server/Cargo.toml delete mode 100644 examples/08_05_multithreaded_tcp_server/src/main.rs delete mode 100644 src/08_example/03_combinators.md rename src/08_example/{04_multithreading.md => 03_multithreading.md} (62%) rename src/08_example/{05_tests.md => 04_tests.md} (70%) diff --git a/ci/dictionary.txt b/ci/dictionary.txt index 64e74c3e..d556683f 100644 --- a/ci/dictionary.txt +++ b/ci/dictionary.txt @@ -57,6 +57,7 @@ proxying pseudocode ReadIntoBuf recognise +refactor RefCell repurposed requeue diff --git a/examples/08_04_concurrent_tcp_server/Cargo.toml b/examples/08_04_concurrent_tcp_server/Cargo.toml index dabf40cf..b4afb235 100644 --- a/examples/08_04_concurrent_tcp_server/Cargo.toml +++ b/examples/08_04_concurrent_tcp_server/Cargo.toml @@ -11,4 +11,4 @@ futures = "0.3" [dependencies.async-std] version = "1.6" -features = ["attributes"] \ No newline at end of file +features = ["attributes"] diff --git a/examples/08_04_concurrent_tcp_server/src/main.rs b/examples/08_04_concurrent_tcp_server/src/main.rs index 59e809e2..0cf19bfd 100644 --- a/examples/08_04_concurrent_tcp_server/src/main.rs +++ b/examples/08_04_concurrent_tcp_server/src/main.rs @@ -8,9 +8,9 @@ async fn main() { let listener = TcpListener::bind("127.0.0.1:7878").await.unwrap(); listener .incoming() - .for_each_concurrent(/* limit */ None, |stream| async move { - let stream = stream.unwrap(); - handle_connection(stream).await; + .for_each_concurrent(/* limit */ None, |tcpstream| async move { + let tcpstream = tcpstream.unwrap(); + handle_connection(tcpstream).await; }) .await; } diff --git a/examples/08_06_final_tcp_server/Cargo.toml b/examples/08_05_final_tcp_server/Cargo.toml similarity index 100% rename from examples/08_06_final_tcp_server/Cargo.toml rename to examples/08_05_final_tcp_server/Cargo.toml diff --git a/examples/08_06_final_tcp_server/hello.html b/examples/08_05_final_tcp_server/hello.html similarity index 100% rename from examples/08_06_final_tcp_server/hello.html rename to examples/08_05_final_tcp_server/hello.html diff --git a/examples/08_06_final_tcp_server/src/main.rs b/examples/08_05_final_tcp_server/src/main.rs similarity index 74% rename from examples/08_06_final_tcp_server/src/main.rs rename to examples/08_05_final_tcp_server/src/main.rs index e936cb7e..81f98c8c 100644 --- a/examples/08_06_final_tcp_server/src/main.rs +++ b/examples/08_05_final_tcp_server/src/main.rs @@ -1,21 +1,24 @@ use std::fs; -use std::time::{Duration, Instant}; -use futures::join; +use futures::stream::StreamExt; use async_std::net::TcpListener; use async_std::prelude::*; +// ANCHOR: main_func use async_std::task::spawn; #[async_std::main] async fn main() { let listener = TcpListener::bind("127.0.0.1:7878").await.unwrap(); - - loop { - let (stream, _) = listener.accept().await.unwrap(); - spawn(handle_connection(stream)); - } + listener + .incoming() + .for_each_concurrent(/* limit */ None, |stream| async move { + let stream = stream.unwrap(); + spawn(handle_connection(stream)); + }) + .await; } +// ANCHOR_END: main_func use async_std::io::{Read, Write}; use std::marker::Unpin; @@ -35,43 +38,6 @@ async fn handle_connection(mut stream: impl Read + Write + Unpin) { stream.flush().await.unwrap(); } -// ANCHOR: slow_functions -use async_std::task::sleep; - -async fn write_to_database() { - // Simulate a slow request - sleep(Duration::from_secs(2)).await; -} - -async fn add_to_queue() { - // Simulate a slow request - sleep(Duration::from_secs(3)).await; -} -// ANCHOR_END: slow_functions - -async fn foo() { - // ANCHOR: serial_execution - let now = Instant::now(); - write_to_database().await; - add_to_queue().await; - println!( - "Write to database + add to queue took {} seconds", - now.elapsed().as_secs() - ); - // ANCHOR_END: serial_execution -} - -async fn bar() { - // ANCHOR: parallel_execution - let now = Instant::now(); - join!(write_to_database(), add_to_queue()); - println!( - "Write to database + add to queue took {} seconds", - now.elapsed().as_secs() - ); - // ANCHOR_END: parallel_execution -} - #[cfg(test)] mod tests { diff --git a/examples/08_05_multithreaded_tcp_server/Cargo.toml b/examples/08_05_multithreaded_tcp_server/Cargo.toml deleted file mode 100644 index c917492f..00000000 --- a/examples/08_05_multithreaded_tcp_server/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "multithreaded_tcp_server" -version = "0.1.0" -authors = ["Your Name - stream.write(response.as_bytes()).await.unwrap(); - stream.flush().await.unwrap(); -} diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 5fdf829c..939a0cb1 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -15,6 +15,5 @@ members = [ "08_02_async_tcp_server", "08_03_slow_request", "08_04_concurrent_tcp_server", - "08_05_multithreaded_tcp_server", - "08_06_final_tcp_server", + "08_05_final_tcp_server", ] diff --git a/src/08_example/01_running_async_code.md b/src/08_example/01_running_async_code.md index 225eb6ed..63839085 100644 --- a/src/08_example/01_running_async_code.md +++ b/src/08_example/01_running_async_code.md @@ -36,6 +36,7 @@ Please consult the section on choosing a runtime for more information on asynchr [//]: <> (TODO: Link to section on runtimes once complete.) +## Adding an Async Runtime Here, we'll use an executor from the `async-std` crate. The `#[async_std::main]` attribute from `async-std` allows us to write an asynchronous main function. To use it, enable the `attributes` feature of `async-std` in `Cargo.toml`: @@ -45,13 +46,17 @@ version = "1.6" features = ["attributes"] ``` -It might be tempting to write something like this: +As a first step, we'll swap to the asynchronous main function, +and `await` the future returned by the async version of `handle_connection`. +Then, we'll test how the server responds. +Here's what that would look like: ```rust {{#include ../../examples/08_02_async_tcp_server/src/main.rs:main_func}} ``` +Now, let's test to see if our server can handle connections concurrently. +Simply making `handle_connection` asynchronous doesn't mean that the server +can handle multiple connections at the same time, and we'll soon see why. -However, just because this program uses an asynchronous connection handler -doesn't mean that it handles connections concurrently. To illustrate this, let's simulate a slow request. When a client makes a request to `127.0.0.1:7878/sleep`, our server will sleep for 5 seconds: @@ -62,7 +67,7 @@ our server will sleep for 5 seconds: This is very similar to the [simulation of a slow request](https://doc.rust-lang.org/book/ch20-02-multithreaded.html#simulating-a-slow-request-in-the-current-server-implementation) from the Book, but with one important difference: -we're using the non-blocking `async_std::task::sleep` instead of `std::thread::sleep`, which blocks. +we're using the non-blocking function `async_std::task::sleep` instead of the blocking function `std::thread::sleep`. It's important to remember that even if a piece of code is run within an `async fn` and `await`ed, it may still block. To test whether our server handles connections concurrently, we'll need to ensure that `handle_connection` is non-blocking. diff --git a/src/08_example/02_handling_connections_concurrently.md b/src/08_example/02_handling_connections_concurrently.md index ece0021f..62175f5f 100644 --- a/src/08_example/02_handling_connections_concurrently.md +++ b/src/08_example/02_handling_connections_concurrently.md @@ -7,23 +7,25 @@ In order to fix this, we'll transform `listener.incoming()` from a blocking Iter to a non-blocking Stream. Streams are similar to Iterators, but can be consumed asynchronously. For more information, see the [chapter on Streams](../05_streams/01_chapter.md). -First, let's replace our blocking `std::net::TcpListener` with the non-blocking `async_std::net::TcpListener`, +Let's replace our blocking `std::net::TcpListener` with the non-blocking `async_std::net::TcpListener`, and update our connection handler to accept an `async_std::net::TcpStream`: ```rust,ignore {{#include ../../examples/08_04_concurrent_tcp_server/src/main.rs:handle_connection}} ``` -This change provides two benefits. +The asynchronous version of `TcpListener` implements the `Stream` trait for `listener.incoming()`, +a change which provides two benefits. The first is that `listener.incoming()` no longer blocks the executor. The executor can now yield to other pending futures while there are no incoming TCP connections to be processed. -The second benefit is that `listener.incoming()` now implements the `Stream` trait, -allowing us to handle each connection concurrently using its `for_each_concurrent` method. +The second benefit is that elements from the Stream can optionally be processed concurrently, +using a Stream's `for_each_concurrent` method. +Here, we'll take advantage of this method to handle each incoming request concurrently. We'll need to import the `Stream` trait from the `futures` crate, so our Cargo.toml now looks like this: -```toml -[dependencies] -futures = "0.3" +```diff ++[dependencies] ++futures = "0.3" [dependencies.async-std] version = "1.6" @@ -31,7 +33,7 @@ features = ["attributes"] ``` Now, we can handle each connection concurrently by passing `handle_connection` in through a closure function. -The closure function is run as soon as items in the stream become available. +The closure function takes ownership of each `TcpStream`, and is run as soon as a new `TcpStream` becomes available. As long as `handle_connection` does not block, a slow request will no longer prevent other requests from completing. ```rust,ignore {{#include ../../examples/08_04_concurrent_tcp_server/src/main.rs:main_func}} diff --git a/src/08_example/03_combinators.md b/src/08_example/03_combinators.md deleted file mode 100644 index e5e556f3..00000000 --- a/src/08_example/03_combinators.md +++ /dev/null @@ -1,27 +0,0 @@ -# Multiple Concurrent Actions Per Request -Imagine we wanted to perform some more tasks with each incoming TCP connection. -For example, we might want to write information about the request to a database, -or put some data from the request onto a queue for processing. -Both of these actions can block, meaning that running them asynchronously will likely improve performance. - -Let's modify the [simulated slow request](../08_example/01_running_async_code.md) from earlier in the example, -breaking it into multiple slow function calls: -```rust,ignore -{{#include ../../examples/08_06_final_tcp_server/src/main.rs:slow_functions}} -``` -Again, we're using the non-blocking function `async_std::task::sleep` instead of `std::thread::sleep`, which blocks. - -Now, let's run `write_to_database` and `add_to_queue` within `handle_connection`: -```rust,ignore -{{#include ../../examples/08_06_final_tcp_server/src/main.rs:serial_execution}} -``` - -If you run this code, you'll see "Write to database + add to queue took 5 seconds" printed to the console. -The request took 5 seconds because the program can only add to the queue once writing to the database has completed. - -To run these two asynchronous functions concurrently, we can use the `join` combinator from the `futures` crate: -```rust,ignore -{{#include ../../examples/08_06_final_tcp_server/src/main.rs:parallel_execution}} -``` -Handling a request will now take only 3 seconds. We've successfully run two concurrent tasks! -Please see the [section on combinators](../06_multiple_futures/01_chapter.md) for more information and examples. diff --git a/src/08_example/04_multithreading.md b/src/08_example/03_multithreading.md similarity index 62% rename from src/08_example/04_multithreading.md rename to src/08_example/03_multithreading.md index 75bccc5e..ff2ffd04 100644 --- a/src/08_example/04_multithreading.md +++ b/src/08_example/03_multithreading.md @@ -1,4 +1,4 @@ -# Adding Parallelism +# Serving Requests in Parallel Our example so far has largely presented concurrency (using async code) as an alternative to parallelism (using threads). However, async code and threads are not mutually exclusive. @@ -6,21 +6,20 @@ Async executors can be either single-threaded or multithreaded. For example, the [`async-executor` crate](https://docs.rs/async-executor) used by `async-std` has both a single-threaded `LocalExecutor` and a multi-threaded `Executor`. -Tasks do not always need to be run on the thread that created them, -and async runtimes often include functionality for spawning tasks onto separate threads. +Tasks can either be run on the thread that created them or on a separate thread. +Async runtimes often provide functionality for spawning tasks onto separate threads. Even if tasks are executed on separate threads, they should still be non-blocking. -Some libraries provide functions for spawning blocking tasks onto separate threads, +Some runtimes provide functions for spawning blocking tasks onto separate threads, which is useful for running synchronous code from other libraries. Tasks are usually required to be `Send`, so they can be moved to separate threads. -Some libraries also provide functions for spawning non-`Send` tasks onto a thread-local executor. -Both Tokio and async-std have `task::spawn_blocking` and `task::spawn_local` functions, -although the async-std versions are unstable. +Some runtimes also provide functions for spawning non-`Send` tasks onto a thread-local executor. In our example, `for_each_concurrent` processes each connection concurrently on the same thread as the `main` function. Here, `handle_connection` is both `Send` and non-blocking, so we could have instead spawned new tasks to run `handle_connection`. We can use `async_std::task::spawn` for this purpose: ```rust -{{#include ../../examples/08_05_multithreaded_tcp_server/src/main.rs:main_func}} +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:main_func}} ``` +Now we are using both concurrency and parallelism to handle multiple requests at the same time. diff --git a/src/08_example/05_tests.md b/src/08_example/04_tests.md similarity index 70% rename from src/08_example/05_tests.md rename to src/08_example/04_tests.md index 0e7648b2..524e85c2 100644 --- a/src/08_example/05_tests.md +++ b/src/08_example/04_tests.md @@ -1,18 +1,20 @@ # Testing the TCP Server Let's move on to testing our `handle_connection` function. -First, we need a `TcpStream` to work with, but we don't want to make a real TCP connection in test code. -We could work around this in a few ways. -One strategy could be to refactor the code to be more modular, -and only test that the correct responses are returned for the respective inputs. - -Another strategy is to connect to `localhost` on port 0. +First, we need a `TcpStream` to work with. +In an end-to-end or integration test, we might want a to make a real TCP connection +to test our code. +One strategy for doing this could be to connect to `localhost` on port 0. Port 0 isn't a valid UNIX port, but it'll work for testing. The operating system will return a connection on any open TCP port. -Instead of those strategies, we'll change the signature of `handle_connection` to make it easier to test. +Instead, in this example we'll write a unit test for the connection handler, +to check that the correct responses are returned for the respective inputs. +To keep our unit test isolated and deterministic, we'll replace the `TcpStream` with a mock. + +First, we'll change the signature of `handle_connection` to make it easier to test. `handle_connection` doesn't actually require an `async_std::net::TcpStream`; it requires any struct that implements `async_std::io::Read`, `async_std::io::Write`, and `marker::Unpin`. -Changing the type signature to reflect this allows us to pass a mock for testing instead of a TcpStream. +Changing the type signature to reflect this allows us to pass a mock for testing. ```rust,ignore use std::marker::Unpin; use async_std::io::{Read, Write}; @@ -25,7 +27,7 @@ First, let's implement the `Read` trait, with one method, `poll_read`. Our mock `TcpStream` will contain some data that is copied into the read buffer, and we'll return `Poll::Ready` to signify that the read is complete. ```rust,ignore -{{#include ../../examples/08_06_final_tcp_server/src/main.rs:mock_read}} +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:mock_read}} ``` Our implementation of `Write` is very similar, @@ -34,13 +36,13 @@ although we'll need to write three methods: `poll_write`, `poll_flush`, and `pol No work needs to be done to flush or close the mock `TcpStream`, so `poll_flush` and `poll_close` can just return `Poll::Ready`. ```rust,ignore -{{#include ../../examples/08_06_final_tcp_server/src/main.rs:mock_write}} +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:mock_write}} ``` Lastly, our mock will need to implement `Unpin`, signifying that its location in memory can safely be moved. For more information on pinning and the `Unpin` trait, see the [section on pinning](../04_pinning/01_chapter.md). ```rust,ignore -{{#include ../../examples/08_06_final_tcp_server/src/main.rs:unpin}} +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:unpin}} ``` Now we're ready to test the `handle_connection` function. @@ -49,5 +51,5 @@ we can run `handle_connection` using the attribute `#[async_std::test]`, similar To ensure that `handle_connection` works as intended, we'll check that the correct data was written to the `MockTcpStream` based on its initial contents. ```rust,ignore -{{#include ../../examples/08_06_final_tcp_server/src/main.rs:test}} +{{#include ../../examples/08_05_final_tcp_server/src/main.rs:test}} ``` \ No newline at end of file diff --git a/src/SUMMARY.md b/src/SUMMARY.md index 526d899c..54b03bf2 100644 --- a/src/SUMMARY.md +++ b/src/SUMMARY.md @@ -28,9 +28,8 @@ - [Final Project: HTTP Server](08_example/00_intro.md) - [Running Asynchronous Code](08_example/01_running_async_code.md) - [Handling Connections Concurrently](08_example/02_handling_connections_concurrently.md) - - [Multiple Actions Per Request](08_example/03_combinators.md) - - [Adding Parallelism](08_example/04_multithreading.md) - - [Testing the Server](08_example/05_tests.md) + - [Adding Parallelism](08_example/03_multithreading.md) + - [Testing the Server](08_example/04_tests.md) - [TODO: I/O](404.md) - [TODO: `AsyncRead` and `AsyncWrite`](404.md) - [TODO: Asynchronous Design Patterns: Solutions and Suggestions](404.md) From b9d975f2993f6dfa40426f5ccb3d59c23c25786e Mon Sep 17 00:00:00 2001 From: Lee Bernick Date: Mon, 14 Sep 2020 21:25:58 -0400 Subject: [PATCH 10/12] Formatting and spellcheck --- ci/dictionary.txt | 1 + examples/08_03_slow_request/Cargo.toml | 2 +- examples/08_05_final_tcp_server/Cargo.toml | 2 +- src/08_example/01_running_async_code.md | 13 ++++++++----- src/SUMMARY.md | 2 +- 5 files changed, 12 insertions(+), 8 deletions(-) diff --git a/ci/dictionary.txt b/ci/dictionary.txt index d556683f..13c52ec2 100644 --- a/ci/dictionary.txt +++ b/ci/dictionary.txt @@ -39,6 +39,7 @@ localhost LocalExecutor metadata MockTcpStream +multi multithreaded multithreading Mutex diff --git a/examples/08_03_slow_request/Cargo.toml b/examples/08_03_slow_request/Cargo.toml index 5a846ab0..aff14020 100644 --- a/examples/08_03_slow_request/Cargo.toml +++ b/examples/08_03_slow_request/Cargo.toml @@ -8,4 +8,4 @@ edition = "2018" [dependencies.async-std] version = "1.6" -features = ["attributes"] \ No newline at end of file +features = ["attributes"] diff --git a/examples/08_05_final_tcp_server/Cargo.toml b/examples/08_05_final_tcp_server/Cargo.toml index 0bf0e9c9..cb84f91f 100644 --- a/examples/08_05_final_tcp_server/Cargo.toml +++ b/examples/08_05_final_tcp_server/Cargo.toml @@ -11,4 +11,4 @@ futures = "0.3" [dependencies.async-std] version = "1.6" -features = ["attributes"] \ No newline at end of file +features = ["attributes"] diff --git a/src/08_example/01_running_async_code.md b/src/08_example/01_running_async_code.md index 63839085..9597232d 100644 --- a/src/08_example/01_running_async_code.md +++ b/src/08_example/01_running_async_code.md @@ -1,7 +1,9 @@ # Running Asynchronous Code -An HTTP server should be able to serve multiple clients concurrently. -Waiting for previous requests to complete before handling the current request can cause performance to suffer. -The book [solves this problem](https://doc.rust-lang.org/book/ch20-02-multithreaded.html#turning-our-single-threaded-server-into-a-multithreaded-server) by creating a thread pool where each connection is handled on its own thread. +An HTTP server should be able to serve multiple clients concurrently; +that is, it should not wait for previous requests to complete before handling the current request. +The book +[solves this problem](https://doc.rust-lang.org/book/ch20-02-multithreaded.html#turning-our-single-threaded-server-into-a-multithreaded-server) +by creating a thread pool where each connection is handled on its own thread. Here, instead of improving throughput by adding threads, we'll achieve the same effect using asynchronous code. Let's modify `handle_connection` to return a future by declaring it an `async fn`: @@ -71,7 +73,8 @@ we're using the non-blocking function `async_std::task::sleep` instead of the bl It's important to remember that even if a piece of code is run within an `async fn` and `await`ed, it may still block. To test whether our server handles connections concurrently, we'll need to ensure that `handle_connection` is non-blocking. -If you run the server, you'll see that a request to `127.0.0.1:7878/sleep` will block any other incoming requests for 5 seconds! +If you run the server, you'll see that a request to `127.0.0.1:7878/sleep` +will block any other incoming requests for 5 seconds! This is because there are no other concurrent tasks that can make progress while we are `await`ing the result of `handle_connection`. -We'll see how to avoid this in the next section. \ No newline at end of file +In the next section, we'll see how to use async code to handle connections concurrently. diff --git a/src/SUMMARY.md b/src/SUMMARY.md index 54b03bf2..725b96aa 100644 --- a/src/SUMMARY.md +++ b/src/SUMMARY.md @@ -28,7 +28,7 @@ - [Final Project: HTTP Server](08_example/00_intro.md) - [Running Asynchronous Code](08_example/01_running_async_code.md) - [Handling Connections Concurrently](08_example/02_handling_connections_concurrently.md) - - [Adding Parallelism](08_example/03_multithreading.md) + - [Serving Requests in Parallel](08_example/03_multithreading.md) - [Testing the Server](08_example/04_tests.md) - [TODO: I/O](404.md) - [TODO: `AsyncRead` and `AsyncWrite`](404.md) From 61efda5aa147502c3ef42b3fa806c851218fb8a4 Mon Sep 17 00:00:00 2001 From: Lee Bernick Date: Wed, 16 Sep 2020 16:29:02 -0400 Subject: [PATCH 11/12] Apply suggestions from code review Co-authored-by: Tyler Mandry --- src/08_example/01_running_async_code.md | 2 +- src/08_example/03_multithreading.md | 2 +- src/08_example/04_tests.md | 5 +++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/08_example/01_running_async_code.md b/src/08_example/01_running_async_code.md index 9597232d..ad05ab56 100644 --- a/src/08_example/01_running_async_code.md +++ b/src/08_example/01_running_async_code.md @@ -48,7 +48,7 @@ version = "1.6" features = ["attributes"] ``` -As a first step, we'll swap to the asynchronous main function, +As a first step, we'll switch to an asynchronous main function, and `await` the future returned by the async version of `handle_connection`. Then, we'll test how the server responds. Here's what that would look like: diff --git a/src/08_example/03_multithreading.md b/src/08_example/03_multithreading.md index ff2ffd04..8097f1a1 100644 --- a/src/08_example/03_multithreading.md +++ b/src/08_example/03_multithreading.md @@ -10,7 +10,7 @@ Tasks can either be run on the thread that created them or on a separate thread. Async runtimes often provide functionality for spawning tasks onto separate threads. Even if tasks are executed on separate threads, they should still be non-blocking. -Some runtimes provide functions for spawning blocking tasks onto separate threads, +Some runtimes provide functions for spawning blocking tasks onto dedicated threads, which is useful for running synchronous code from other libraries. Tasks are usually required to be `Send`, so they can be moved to separate threads. Some runtimes also provide functions for spawning non-`Send` tasks onto a thread-local executor. diff --git a/src/08_example/04_tests.md b/src/08_example/04_tests.md index 524e85c2..e0a87962 100644 --- a/src/08_example/04_tests.md +++ b/src/08_example/04_tests.md @@ -1,7 +1,8 @@ # Testing the TCP Server Let's move on to testing our `handle_connection` function. + First, we need a `TcpStream` to work with. -In an end-to-end or integration test, we might want a to make a real TCP connection +In an end-to-end or integration test, we might want to make a real TCP connection to test our code. One strategy for doing this could be to connect to `localhost` on port 0. Port 0 isn't a valid UNIX port, but it'll work for testing. @@ -52,4 +53,4 @@ To ensure that `handle_connection` works as intended, we'll check that the corre was written to the `MockTcpStream` based on its initial contents. ```rust,ignore {{#include ../../examples/08_05_final_tcp_server/src/main.rs:test}} -``` \ No newline at end of file +``` From 6352134c2d42f99773ec4f38c0cc66d29e72d881 Mon Sep 17 00:00:00 2001 From: Lee Bernick Date: Wed, 16 Sep 2020 16:32:01 -0400 Subject: [PATCH 12/12] Update in response to comments from Tyler --- src/08_example/02_handling_connections_concurrently.md | 6 +++--- src/08_example/04_tests.md | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/08_example/02_handling_connections_concurrently.md b/src/08_example/02_handling_connections_concurrently.md index 62175f5f..2fc9aa50 100644 --- a/src/08_example/02_handling_connections_concurrently.md +++ b/src/08_example/02_handling_connections_concurrently.md @@ -27,9 +27,9 @@ We'll need to import the `Stream` trait from the `futures` crate, so our Cargo.t +[dependencies] +futures = "0.3" -[dependencies.async-std] -version = "1.6" -features = ["attributes"] + [dependencies.async-std] + version = "1.6" + features = ["attributes"] ``` Now, we can handle each connection concurrently by passing `handle_connection` in through a closure function. diff --git a/src/08_example/04_tests.md b/src/08_example/04_tests.md index 524e85c2..702f9814 100644 --- a/src/08_example/04_tests.md +++ b/src/08_example/04_tests.md @@ -3,9 +3,9 @@ Let's move on to testing our `handle_connection` function. First, we need a `TcpStream` to work with. In an end-to-end or integration test, we might want a to make a real TCP connection to test our code. -One strategy for doing this could be to connect to `localhost` on port 0. +One strategy for doing this is to start a listener on `localhost` port 0. Port 0 isn't a valid UNIX port, but it'll work for testing. -The operating system will return a connection on any open TCP port. +The operating system will pick an open TCP port for us. Instead, in this example we'll write a unit test for the connection handler, to check that the correct responses are returned for the respective inputs.