From c9ba1b53069181cff921bbf37bef9e8d864ffc94 Mon Sep 17 00:00:00 2001 From: afinch7 Date: Thu, 14 Nov 2019 14:30:41 -0500 Subject: [PATCH 1/9] use new std future api --- .rustfmt.toml | 1 + Cargo.lock | 121 ++++++++++++- cli/Cargo.toml | 2 +- cli/compilers/js.rs | 6 +- cli/compilers/json.rs | 8 +- cli/compilers/mod.rs | 2 +- cli/compilers/ts.rs | 112 ++++++------ cli/compilers/wasm.rs | 14 +- cli/file_fetcher.rs | 72 ++++---- cli/global_state.rs | 25 ++- cli/global_timer.rs | 12 +- cli/http_body.rs | 80 +++++---- cli/http_util.rs | 58 +++--- cli/lib.rs | 189 ++++++++++---------- cli/ops/compiler.rs | 19 +- cli/ops/dispatch_json.rs | 47 ++--- cli/ops/dispatch_minimal.rs | 40 ++--- cli/ops/fetch.rs | 45 +++-- cli/ops/files.rs | 52 +++--- cli/ops/io.rs | 195 +++++++++++++------- cli/ops/net.rs | 136 ++++++++------ cli/ops/process.rs | 54 +++--- cli/ops/timers.rs | 4 +- cli/ops/tls.rs | 170 ++++++++++-------- cli/ops/workers.rs | 87 +++++---- cli/resolve_addr.rs | 34 ++-- cli/state.rs | 24 +-- cli/tests/integration_tests.rs | 2 + cli/tokio_util.rs | 53 ++---- cli/worker.rs | 125 +++++++------ core/Cargo.toml | 2 +- core/examples/http_bench.rs | 140 +++++++++------ core/isolate.rs | 314 ++++++++++++++++++++------------- core/modules.rs | 198 +++++++++++---------- core/ops.rs | 6 +- 35 files changed, 1444 insertions(+), 1005 deletions(-) diff --git a/.rustfmt.toml b/.rustfmt.toml index faa3d5a33a3d8c..ea036b878a9537 100644 --- a/.rustfmt.toml +++ b/.rustfmt.toml @@ -1,3 +1,4 @@ # Copyright 2018-2019 the Deno authors. All rights reserved. MIT license. max_width = 80 tab_spaces = 2 +edition = "2018" \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index dcb3c7ab559cb4..b60700feae47fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -272,7 +272,7 @@ name = "deno" version = "0.23.0" dependencies = [ "downcast-rs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -293,7 +293,7 @@ dependencies = [ "deno 0.23.0", "deno_typescript 0.23.0", "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "fwdansi 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "http 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", @@ -452,6 +452,34 @@ name = "futures" version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "futures" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures-channel 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-executor 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-io 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-task 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-channel" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "futures-cpupool" version = "0.1.8" @@ -461,6 +489,63 @@ dependencies = [ "num_cpus 1.11.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "futures-executor" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-task 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.11.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-io" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "futures-macro" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-sink" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "futures-task" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "futures-util" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-channel 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-io 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-macro 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-task 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-nested 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "fwdansi" version = "1.0.1" @@ -862,11 +947,31 @@ name = "percent-encoding" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "pin-utils" +version = "0.1.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "ppv-lite86" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "proc-macro-hack" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro-nested" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "proc-macro2" version = "1.0.6" @@ -1986,7 +2091,16 @@ dependencies = [ "checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" "checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" "checksum futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" +"checksum futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b6f16056ecbb57525ff698bb955162d0cd03bee84e6241c27ff75c08d8ca5987" +"checksum futures-channel 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fcae98ca17d102fd8a3603727b9259fcf7fa4239b603d2142926189bc8999b86" +"checksum futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "79564c427afefab1dfb3298535b21eda083ef7935b4f0ecbfcb121f0aec10866" "checksum futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" +"checksum futures-executor 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1e274736563f686a837a0568b478bdabfeaec2dca794b5649b04e2fe1627c231" +"checksum futures-io 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e676577d229e70952ab25f3945795ba5b16d63ca794ca9d2c860e5595d20b5ff" +"checksum futures-macro 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "52e7c56c15537adb4f76d0b7a76ad131cb4d2f4f32d3b0bcabcbe1c7c5e87764" +"checksum futures-sink 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "171be33efae63c2d59e6dbba34186fe0d6394fb378069a76dfd80fdcffd43c16" +"checksum futures-task 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0bae52d6b29cf440e298856fec3965ee6fa71b06aa7495178615953fd669e5f9" +"checksum futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c0d66274fb76985d3c62c886d1da7ac4c0903a8c9f754e8fe0f35a6a6cc39e76" "checksum fwdansi 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "34dd4c507af68d37ffef962063dfa1944ce0dd4d5b82043dbab1dabe088610c3" "checksum getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e7db7ca94ed4cd01190ceee0d8a8052f08a247aa1b469a7f68c6a3b71afcf407" "checksum h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" @@ -2029,7 +2143,10 @@ dependencies = [ "checksum parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" "checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" "checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +"checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" "checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" +"checksum proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)" = "ecd45702f76d6d3c75a80564378ae228a85f0b59d2f3ed43c91b4a69eb2ebfc5" +"checksum proc-macro-nested 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "369a6ed065f249a159e06c45752c780bda2fb53c995718f9e484d08daa9eb42e" "checksum proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9c9e470a8dc4aeae2dee2f335e8f533e2d4b347e1434e5671afc49b054592f27" "checksum publicsuffix 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9bf259a81de2b2eb9850ec990ec78e6a25319715584fd7652b9b26f96fcb1510" "checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 4e2ab18c83a586..cb6f90add1f799 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -31,7 +31,7 @@ base64 = "0.11.0" byteorder = "1.3.2" clap = "2.33.0" dirs = "2.0.2" -futures = "0.1.29" +futures = { version = "0.3", features = [ "compat", "io-compat" ] } http = "0.1.19" hyper = "0.12.35" hyper-rustls = "0.17.1" diff --git a/cli/compilers/js.rs b/cli/compilers/js.rs index af79690d649807..b6277659f012e1 100644 --- a/cli/compilers/js.rs +++ b/cli/compilers/js.rs @@ -2,6 +2,8 @@ use crate::compilers::CompiledModule; use crate::compilers::CompiledModuleFuture; use crate::file_fetcher::SourceFile; +use crate::futures::future::FutureExt; +use std::pin::Pin; use std::str; pub struct JsCompiler {} @@ -10,7 +12,7 @@ impl JsCompiler { pub fn compile_async( self: &Self, source_file: &SourceFile, - ) -> Box { + ) -> Pin> { let module = CompiledModule { code: str::from_utf8(&source_file.source_code) .unwrap() @@ -18,6 +20,6 @@ impl JsCompiler { name: source_file.url.to_string(), }; - Box::new(futures::future::ok(module)) + futures::future::ok(module).boxed() } } diff --git a/cli/compilers/json.rs b/cli/compilers/json.rs index 22a1d5f3d10230..c1f63e37ba7304 100644 --- a/cli/compilers/json.rs +++ b/cli/compilers/json.rs @@ -2,8 +2,10 @@ use crate::compilers::CompiledModule; use crate::compilers::CompiledModuleFuture; use crate::file_fetcher::SourceFile; +use crate::futures::future::FutureExt; use deno::ErrBox; use regex::Regex; +use std::pin::Pin; use std::str; // From https://github.com/mathiasbynens/mothereff.in/blob/master/js-variables/eff.js @@ -15,11 +17,11 @@ impl JsonCompiler { pub fn compile_async( self: &Self, source_file: &SourceFile, - ) -> Box { + ) -> Pin> { let maybe_json_value: serde_json::Result = serde_json::from_str(&str::from_utf8(&source_file.source_code).unwrap()); if let Err(err) = maybe_json_value { - return Box::new(futures::future::err(ErrBox::from(err))); + return futures::future::err(ErrBox::from(err)).boxed(); } let mut code = format!( @@ -50,6 +52,6 @@ impl JsonCompiler { name: source_file.url.to_string(), }; - Box::new(futures::future::ok(module)) + futures::future::ok(module).boxed() } } diff --git a/cli/compilers/mod.rs b/cli/compilers/mod.rs index dca5bc7b6e4cd4..4f32f0b3f55c18 100644 --- a/cli/compilers/mod.rs +++ b/cli/compilers/mod.rs @@ -19,4 +19,4 @@ pub struct CompiledModule { } pub type CompiledModuleFuture = - dyn Future + Send; + dyn Future> + Send; diff --git a/cli/compilers/ts.rs b/cli/compilers/ts.rs index cac38265970390..34bf74ab1da8d1 100644 --- a/cli/compilers/ts.rs +++ b/cli/compilers/ts.rs @@ -15,13 +15,15 @@ use crate::worker::Worker; use deno::Buf; use deno::ErrBox; use deno::ModuleSpecifier; +use futures::future::FutureExt; +use futures::future::TryFutureExt; use futures::Future; -use futures::IntoFuture; use regex::Regex; use std::collections::HashSet; use std::fs; use std::io; use std::path::PathBuf; +use std::pin::Pin; use std::str; use std::sync::atomic::Ordering; use std::sync::Mutex; @@ -254,7 +256,7 @@ impl TsCompiler { global_state: ThreadSafeGlobalState, module_name: String, out_file: Option, - ) -> impl Future { + ) -> impl Future> { debug!( "Invoking the compiler to bundle. module_name: {}", module_name @@ -270,19 +272,17 @@ impl TsCompiler { let worker = TsCompiler::setup_worker(global_state.clone()); let worker_ = worker.clone(); - let first_msg_fut = worker - .post_message(req_msg) - .into_future() - .then(move |_| worker) - .then(move |result| { - if let Err(err) = result { - // TODO(ry) Need to forward the error instead of exiting. - eprintln!("{}", err.to_string()); - std::process::exit(1); - } - debug!("Sent message to worker"); - worker_.get_message() - }); + let first_msg_fut = async move { + worker.post_message(req_msg).await.unwrap(); + let result = worker.await; + if let Err(err) = result { + // TODO(ry) Need to forward the error instead of exiting. + eprintln!("{}", err.to_string()); + std::process::exit(1); + } + debug!("Sent message to worker"); + worker_.get_message().await + }; first_msg_fut.map_err(|_| panic!("not handled")).and_then( move |maybe_msg: Option| { @@ -292,11 +292,11 @@ impl TsCompiler { let json_str = std::str::from_utf8(&msg).unwrap(); debug!("Message: {}", json_str); if let Some(diagnostics) = Diagnostic::from_emit_result(json_str) { - return Err(ErrBox::from(diagnostics)); + return futures::future::err(ErrBox::from(diagnostics)); } } - Ok(()) + futures::future::ok(()) }, ) } @@ -325,11 +325,11 @@ impl TsCompiler { self: &Self, global_state: ThreadSafeGlobalState, source_file: &SourceFile, - ) -> Box { + ) -> Pin> { if self.has_compiled(&source_file.url) { return match self.get_compiled_module(&source_file.url) { - Ok(compiled) => Box::new(futures::future::ok(compiled)), - Err(err) => Box::new(futures::future::err(err)), + Ok(compiled) => futures::future::ok(compiled).boxed(), + Err(err) => futures::future::err(err).boxed(), }; } @@ -351,7 +351,7 @@ impl TsCompiler { self.get_compiled_module(&source_file.url) { self.mark_compiled(&source_file.url); - return Box::new(futures::future::ok(compiled_module)); + return futures::future::ok(compiled_module).boxed(); } } } @@ -382,19 +382,17 @@ impl TsCompiler { .add("Compile", &module_url.to_string()); let global_state_ = global_state.clone(); - let first_msg_fut = worker - .post_message(req_msg) - .into_future() - .then(move |_| worker) - .then(move |result| { - if let Err(err) = result { - // TODO(ry) Need to forward the error instead of exiting. - eprintln!("{}", err.to_string()); - std::process::exit(1); - } - debug!("Sent message to worker"); - worker_.get_message() - }); + let first_msg_fut = async move { + worker.post_message(req_msg).await.unwrap(); + let result = worker.await; + if let Err(err) = result { + // TODO(ry) Need to forward the error instead of exiting. + eprintln!("{}", err.to_string()); + std::process::exit(1); + } + debug!("Sent message to worker"); + worker_.get_message().await + }; let fut = first_msg_fut .map_err(|_| panic!("not handled")) @@ -405,37 +403,42 @@ impl TsCompiler { let json_str = std::str::from_utf8(&msg).unwrap(); debug!("Message: {}", json_str); if let Some(diagnostics) = Diagnostic::from_emit_result(json_str) { - return Err(ErrBox::from(diagnostics)); + return futures::future::err(ErrBox::from(diagnostics)); } } - Ok(()) + futures::future::ok(()) }) .and_then(move |_| { // if we are this far it means compilation was successful and we can // load compiled filed from disk - global_state_ - .ts_compiler - .get_compiled_module(&source_file_.url) - .map_err(|e| { - // TODO: this situation shouldn't happen - panic!("Expected to find compiled file: {} {}", e, source_file_.url) - }) + futures::future::ready( + global_state_ + .ts_compiler + .get_compiled_module(&source_file_.url) + .map_err(|e| { + // TODO: this situation shouldn't happen + panic!( + "Expected to find compiled file: {} {}", + e, source_file_.url + ) + }), + ) }) .and_then(move |compiled_module| { // Explicit drop to keep reference alive until future completes. drop(compiling_job); - Ok(compiled_module) + futures::future::ok(compiled_module) }) .then(move |r| { debug!(">>>>> compile_sync END"); // TODO(ry) do this in worker's destructor. // resource.close(); - r + futures::future::ready(r) }); - Box::new(fut) + fut.boxed() } /// Get associated `CompiledFileMetadata` for given module if it exists. @@ -656,7 +659,6 @@ mod tests { use crate::fs as deno_fs; use crate::tokio_util; use deno::ModuleSpecifier; - use futures::future::lazy; use std::path::PathBuf; use tempfile::TempDir; @@ -682,7 +684,7 @@ mod tests { String::from("hello.js"), ]); - tokio_util::run(lazy(move || { + tokio_util::run( mock_state .ts_compiler .compile_async(mock_state.clone(), &out) @@ -693,9 +695,9 @@ mod tests { .code .as_bytes() .starts_with("console.log(\"Hello World\");".as_bytes())); - Ok(()) - }) - })) + futures::future::ok(()) + }), + ) } #[test] @@ -716,7 +718,7 @@ mod tests { String::from("$deno$/bundle.js"), ]); - tokio_util::run(lazy(move || { + tokio_util::run( state .ts_compiler .bundle_async( @@ -726,9 +728,9 @@ mod tests { ) .then(|result| { assert!(result.is_ok()); - Ok(()) - }) - })) + futures::future::ok(()) + }), + ) } #[test] diff --git a/cli/compilers/wasm.rs b/cli/compilers/wasm.rs index e0a715f84a19cf..30a171db44b1f4 100644 --- a/cli/compilers/wasm.rs +++ b/cli/compilers/wasm.rs @@ -7,11 +7,12 @@ use crate::startup_data; use crate::state::*; use crate::worker::Worker; use deno::Buf; -use futures::Future; -use futures::IntoFuture; +use futures::FutureExt; +use futures::TryFutureExt; use serde_derive::Deserialize; use serde_json; use std::collections::HashMap; +use std::pin::Pin; use std::sync::atomic::Ordering; use std::sync::{Arc, Mutex}; use url::Url; @@ -71,11 +72,11 @@ impl WasmCompiler { self: &Self, global_state: ThreadSafeGlobalState, source_file: &SourceFile, - ) -> Box { + ) -> Pin> { let cache = self.cache.clone(); let maybe_cached = { cache.lock().unwrap().get(&source_file.url).cloned() }; if let Some(m) = maybe_cached { - return Box::new(futures::future::ok(m.clone())); + return futures::future::ok(m.clone()).boxed(); } let cache_ = self.cache.clone(); @@ -92,7 +93,6 @@ impl WasmCompiler { .into_boxed_str() .into_boxed_bytes(), ) - .into_future() .then(move |_| worker) .then(move |result| { if let Err(err) = result { @@ -124,9 +124,9 @@ impl WasmCompiler { cache_.lock().unwrap().insert(url.clone(), module.clone()); } debug!("<<<<< wasm_compile_async END"); - Ok(module) + futures::future::ok(module) }); - Box::new(fut) + fut.boxed() } } diff --git a/cli/file_fetcher.rs b/cli/file_fetcher.rs index d28ed0e26e2f1f..93f8fdefed6b00 100644 --- a/cli/file_fetcher.rs +++ b/cli/file_fetcher.rs @@ -12,13 +12,16 @@ use crate::tokio_util; use deno::ErrBox; use deno::ModuleSpecifier; use futures::future::Either; -use futures::Future; +use futures::future::FutureExt; +use futures::future::TryFutureExt; use serde_json; use std; use std::collections::HashMap; use std::fs; +use std::future::Future; use std::path::Path; use std::path::PathBuf; +use std::pin::Pin; use std::result::Result; use std::str; use std::sync::Arc; @@ -39,7 +42,7 @@ pub struct SourceFile { } pub type SourceFileFuture = - dyn Future + Send; + dyn Future> + Send; /// Simple struct implementing in-process caching to prevent multiple /// fs reads/net fetches for same file. @@ -119,14 +122,14 @@ impl SourceFileFetcher { pub fn fetch_source_file_async( self: &Self, specifier: &ModuleSpecifier, - ) -> Box { + ) -> Pin> { let module_url = specifier.as_url().to_owned(); debug!("fetch_source_file. specifier {} ", &module_url); // Check if this file was already fetched and can be retrieved from in-process cache. if let Some(source_file) = self.source_file_cache.get(specifier.to_string()) { - return Box::new(futures::future::ok(source_file)); + return futures::future::ok(source_file).boxed(); } let source_file_cache = self.source_file_cache.clone(); @@ -139,7 +142,7 @@ impl SourceFileFetcher { self.no_remote_fetch, ) .then(move |result| { - let mut out = result.map_err(|err| { + let mut out = match result.map_err(|err| { if err.kind() == ErrorKind::NotFound { // For NotFound, change the message to something better. DenoError::new( @@ -150,7 +153,10 @@ impl SourceFileFetcher { } else { err } - })?; + }) { + Ok(v) => v, + Err(e) => return futures::future::err(e), + }; // TODO: move somewhere? if out.source_code.starts_with(b"#!") { @@ -160,10 +166,10 @@ impl SourceFileFetcher { // Cache in-process for subsequent access. source_file_cache.set(specifier_.to_string(), out.clone()); - Ok(out) + futures::future::ok(out) }); - Box::new(fut) + fut.boxed() } /// This is main method that is responsible for fetching local or remote files. @@ -180,29 +186,29 @@ impl SourceFileFetcher { module_url: &Url, use_disk_cache: bool, no_remote_fetch: bool, - ) -> impl Future { + ) -> impl Future> { let url_scheme = module_url.scheme(); let is_local_file = url_scheme == "file"; if let Err(err) = SourceFileFetcher::check_if_supported_scheme(&module_url) { - return Either::A(futures::future::err(err)); + return Either::Left(futures::future::err(err)); } // Local files are always fetched from disk bypassing cache entirely. if is_local_file { match self.fetch_local_file(&module_url) { Ok(source_file) => { - return Either::A(futures::future::ok(source_file)); + return Either::Left(futures::future::ok(source_file)); } Err(err) => { - return Either::A(futures::future::err(err)); + return Either::Left(futures::future::err(err)); } } } // Fetch remote file and cache on-disk for subsequent access - Either::B(self.fetch_remote_source_async( + Either::Right(self.fetch_remote_source_async( &module_url, use_disk_cache, no_remote_fetch, @@ -306,9 +312,9 @@ impl SourceFileFetcher { use_disk_cache: bool, no_remote_fetch: bool, redirect_limit: i64, - ) -> Box { + ) -> Pin> { if redirect_limit < 0 { - return Box::new(futures::future::err(too_many_redirects())); + return futures::future::err(too_many_redirects()).boxed(); } let is_blacklisted = @@ -317,13 +323,13 @@ impl SourceFileFetcher { if use_disk_cache && !is_blacklisted { match self.fetch_cached_remote_source(&module_url) { Ok(Some(source_file)) => { - return Box::new(futures::future::ok(source_file)); + return futures::future::ok(source_file).boxed(); } Ok(None) => { // there's no cached version } Err(err) => { - return Box::new(futures::future::err(err)); + return futures::future::err(err).boxed(); } } } @@ -331,7 +337,7 @@ impl SourceFileFetcher { // If file wasn't found in cache check if we can fetch it if no_remote_fetch { // We can't fetch remote file - bail out - return Box::new(futures::future::err( + return futures::future::err( std::io::Error::new( std::io::ErrorKind::NotFound, format!( @@ -340,7 +346,8 @@ impl SourceFileFetcher { ), ) .into(), - )); + ) + .boxed(); } let download_job = self.progress.add("Download", &module_url.to_string()); @@ -364,7 +371,7 @@ impl SourceFileFetcher { drop(download_job); // Recurse - Either::A(dir.fetch_remote_source_async( + Either::Left(dir.fetch_remote_source_async( &new_module_url, use_disk_cache, no_remote_fetch, @@ -403,12 +410,12 @@ impl SourceFileFetcher { // Explicit drop to keep reference alive until future completes. drop(download_job); - Either::B(futures::future::ok(source_file)) + Either::Right(futures::future::ok(source_file)) } } }); - Box::new(f) + f.boxed() } /// Get header metadata associated with a remote file. @@ -890,7 +897,7 @@ mod tests { // Now the old .headers.json file should have gone! Resolved back to TypeScript assert_eq!(&(r4.media_type), &msg::MediaType::TypeScript); assert!(fs::read_to_string(&headers_file_name_3).is_err()); - Ok(()) + futures::future::ok(()) }); // http_util::fetch_sync_string requires tokio @@ -968,7 +975,7 @@ mod tests { .unwrap(), "text/javascript" ); - Ok(()) + futures::future::ok(()) }); tokio_util::run(fut); @@ -1071,7 +1078,7 @@ mod tests { // Examine the meta result. assert_eq!(mod_meta.url.clone(), target_module_url); - Ok(()) + futures::future::ok(()) }); tokio_util::run(fut); @@ -1138,7 +1145,7 @@ mod tests { // Examine the meta result. assert_eq!(mod_meta.url.clone(), target_url); - Ok(()) + futures::future::ok(()) }); tokio_util::run(fut); @@ -1183,9 +1190,8 @@ mod tests { .get_source_file_async(&redirect_url, true, false) .map(move |r| (r, file_modified)) }) - .then(move |result| { + .then(move |(result, file_modified)| { assert!(result.is_ok()); - let (_, file_modified) = result.unwrap(); let result = fs::File::open(&target_path_); assert!(result.is_ok()); let file_2 = result.unwrap(); @@ -1194,7 +1200,7 @@ mod tests { let file_modified_2 = file_metadata_2.modified().unwrap(); assert_eq!(file_modified, file_modified_2); - Ok(()) + futures::future::ok(()) }); tokio_util::run(fut); @@ -1220,7 +1226,7 @@ mod tests { assert!(result.is_err()); let err = result.err().unwrap(); assert_eq!(err.kind(), ErrorKind::TooManyRedirects); - Ok(()) + futures::future::ok(()) }); tokio_util::run(fut); @@ -1255,7 +1261,7 @@ mod tests { }) .then(move |result| { assert!(result.is_ok()); - Ok(()) + futures::future::ok(()) }); tokio_util::run(fut); @@ -1296,7 +1302,7 @@ mod tests { assert_eq!(r2.source_code, b"export const loaded = true;\n"); // Not MediaType::TypeScript due to .headers.json modification assert_eq!(&(r2.media_type), &msg::MediaType::JavaScript); - Ok(()) + futures::future::ok(()) }); tokio_util::run(fut); @@ -1339,7 +1345,7 @@ mod tests { assert_eq!(r2.source_code, "export const loaded = true;\n".as_bytes()); // Not MediaType::TypeScript due to .headers.json modification assert_eq!(&(r2.media_type), &msg::MediaType::JavaScript); - Ok(()) + futures::future::ok(()) }); tokio_util::run(fut); diff --git a/cli/global_state.rs b/cli/global_state.rs index b0c2821707c9b9..38aff2093dac69 100644 --- a/cli/global_state.rs +++ b/cli/global_state.rs @@ -15,9 +15,10 @@ use crate::permissions::DenoPermissions; use crate::progress::Progress; use deno::ErrBox; use deno::ModuleSpecifier; -use futures::Future; +use futures::future::TryFutureExt; use std; use std::env; +use std::future::Future; use std::ops::Deref; use std::str; use std::sync::Arc; @@ -123,7 +124,7 @@ impl ThreadSafeGlobalState { pub fn fetch_compiled_module( self: &Self, module_specifier: &ModuleSpecifier, - ) -> impl Future { + ) -> impl Future> { let state1 = self.clone(); let state2 = self.clone(); @@ -154,15 +155,21 @@ impl ThreadSafeGlobalState { let mut g = lockfile.lock().unwrap(); if state2.flags.lock_write { g.insert(&compiled_module); - } else if !g.check(&compiled_module)? { - eprintln!( - "Subresource integrety check failed --lock={}\n{}", - g.filename, compiled_module.name - ); - std::process::exit(10); + } else { + let check = match g.check(&compiled_module) { + Err(e) => return futures::future::err(ErrBox::from(e)), + Ok(v) => v, + }; + if !check { + eprintln!( + "Subresource integrety check failed --lock={}\n{}", + g.filename, compiled_module.name + ); + std::process::exit(10); + } } } - Ok(compiled_module) + futures::future::ok(compiled_module) }) } diff --git a/cli/global_timer.rs b/cli/global_timer.rs index d3ca52f4604300..9ab760ab4a7bb9 100644 --- a/cli/global_timer.rs +++ b/cli/global_timer.rs @@ -9,9 +9,10 @@ //! calls it) for an entire Isolate. This is what is implemented here. use crate::tokio_util::panic_on_error; -use futures::Future; +use futures::channel::oneshot; +use futures::future::FutureExt; +use std::future::Future; use std::time::Instant; -use tokio::sync::oneshot; use tokio::timer::Delay; #[derive(Default)] @@ -33,7 +34,7 @@ impl GlobalTimer { pub fn new_timeout( &mut self, deadline: Instant, - ) -> impl Future { + ) -> impl Future> { if self.tx.is_some() { self.cancel(); } @@ -42,9 +43,10 @@ impl GlobalTimer { let (tx, rx) = oneshot::channel(); self.tx = Some(tx); - let delay = panic_on_error(Delay::new(deadline)); + let delay = + panic_on_error(futures::compat::Compat01As03::new(Delay::new(deadline))); let rx = panic_on_error(rx); - delay.select(rx).then(|_| Ok(())) + futures::future::select(delay, rx).then(|_| futures::future::ok(())) } } diff --git a/cli/http_body.rs b/cli/http_body.rs index c03dfd637b2573..72ec8017e818e9 100644 --- a/cli/http_body.rs +++ b/cli/http_body.rs @@ -1,19 +1,20 @@ // Copyright 2018-2019 the Deno authors. All rights reserved. MIT license. -use futures::stream::Stream; -use futures::Async; -use futures::Poll; +use futures::io::AsyncRead; +use futures::stream::StreamExt; use reqwest::r#async::Chunk; use reqwest::r#async::Decoder; use std::cmp::min; use std::io; use std::io::Read; -use tokio::io::AsyncRead; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; /// Wraps `reqwest::Decoder` so that it can be exposed as an `AsyncRead` and integrated /// into resources more easily. pub struct HttpBody { - decoder: Decoder, + decoder: futures::compat::Compat01As03, chunk: Option, pos: usize, } @@ -21,7 +22,7 @@ pub struct HttpBody { impl HttpBody { pub fn from(body: Decoder) -> Self { Self { - decoder: body, + decoder: futures::compat::Compat01As03::new(body), chunk: None, pos: 0, } @@ -35,55 +36,58 @@ impl Read for HttpBody { } impl AsyncRead for HttpBody { - fn poll_read(&mut self, buf: &mut [u8]) -> Poll { - if let Some(chunk) = self.chunk.take() { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context, + buf: &mut [u8], + ) -> Poll> { + let mut inner = self.get_mut(); + if let Some(chunk) = inner.chunk.take() { debug!( "HttpBody Fake Read buf {} chunk {} pos {}", buf.len(), chunk.len(), - self.pos + inner.pos ); - let n = min(buf.len(), chunk.len() - self.pos); + let n = min(buf.len(), chunk.len() - inner.pos); { - let rest = &chunk[self.pos..]; + let rest = &chunk[inner.pos..]; buf[..n].clone_from_slice(&rest[..n]); } - self.pos += n; - if self.pos == chunk.len() { - self.pos = 0; + inner.pos += n; + if inner.pos == chunk.len() { + inner.pos = 0; } else { - self.chunk = Some(chunk); + inner.chunk = Some(chunk); } - return Ok(Async::Ready(n)); + return Poll::Ready(Ok(n)); } else { - assert_eq!(self.pos, 0); + assert_eq!(inner.pos, 0); } - let p = self.decoder.poll(); + let p = inner.decoder.poll_next_unpin(cx); match p { - Err(e) => Err( + Poll::Ready(Some(Err(e))) => Poll::Ready(Err( // TODO Need to map hyper::Error into std::io::Error. io::Error::new(io::ErrorKind::Other, e), - ), - Ok(Async::NotReady) => Ok(Async::NotReady), - Ok(Async::Ready(maybe_chunk)) => match maybe_chunk { - None => Ok(Async::Ready(0)), - Some(chunk) => { - debug!( - "HttpBody Real Read buf {} chunk {} pos {}", - buf.len(), - chunk.len(), - self.pos - ); - let n = min(buf.len(), chunk.len()); - buf[..n].clone_from_slice(&chunk[..n]); - if buf.len() < chunk.len() { - self.pos = n; - self.chunk = Some(chunk); - } - Ok(Async::Ready(n)) + )), + Poll::Ready(Some(Ok(chunk))) => { + debug!( + "HttpBody Real Read buf {} chunk {} pos {}", + buf.len(), + chunk.len(), + inner.pos + ); + let n = min(buf.len(), chunk.len()); + buf[..n].clone_from_slice(&chunk[..n]); + if buf.len() < chunk.len() { + inner.pos = n; + inner.chunk = Some(chunk); } - }, + Poll::Ready(Ok(n)) + } + Poll::Ready(None) => Poll::Ready(Ok(0)), + Poll::Pending => Poll::Pending, } } } diff --git a/cli/http_util.rs b/cli/http_util.rs index d3bc164153b123..41a01f1e30a252 100644 --- a/cli/http_util.rs +++ b/cli/http_util.rs @@ -3,7 +3,9 @@ use crate::deno_error; use crate::deno_error::DenoError; use crate::version; use deno::ErrBox; -use futures::{future, Future}; +use futures::future; +use futures::future::FutureExt; +use futures::future::TryFutureExt; use reqwest; use reqwest::header::HeaderMap; use reqwest::header::CONTENT_TYPE; @@ -11,6 +13,8 @@ use reqwest::header::LOCATION; use reqwest::header::USER_AGENT; use reqwest::r#async::Client; use reqwest::RedirectPolicy; +use std::future::Future; +use std::pin::Pin; use url::Url; /// Create new instance of async reqwest::Client. This client supports @@ -70,22 +74,21 @@ pub enum FetchOnceResult { /// yields Redirect(url). pub fn fetch_string_once( url: &Url, -) -> impl Future { +) -> impl Future> { type FetchAttempt = (Option, Option, Option); let url = url.clone(); let client = get_client(); - client - .get(url.clone()) - .send() + futures::compat::Compat01As03::new(client.get(url.clone()).send()) .map_err(ErrBox::from) .and_then( - move |mut response| -> Box< - dyn Future + Send, + move |mut response| -> Pin< + Box> + Send>, > { if response.status().is_redirection() { - let location_string = response.headers() + let location_string = response + .headers() .get(LOCATION) .expect("url redirection should provide 'location' header") .to_str() @@ -94,17 +97,25 @@ pub fn fetch_string_once( debug!("Redirecting to {:?}...", &location_string); let new_url = resolve_url_from_location(&url, location_string); // Boxed trait object turns out to be the savior for 2+ types yielding same results. - return Box::new(future::ok(None).join3( + return futures::future::try_join3( + future::ok(None), future::ok(None), future::ok(Some(FetchOnceResult::Redirect(new_url))), - )); + ) + .boxed(); } - if response.status().is_client_error() || response.status().is_server_error() { - return Box::new(future::err(DenoError::new( - deno_error::ErrorKind::Other, - format!("Import '{}' failed: {}", &url, response.status()), - ).into())); + if response.status().is_client_error() + || response.status().is_server_error() + { + return future::err( + DenoError::new( + deno_error::ErrorKind::Other, + format!("Import '{}' failed: {}", &url, response.status()), + ) + .into(), + ) + .boxed(); } let content_type = response @@ -112,14 +123,17 @@ pub fn fetch_string_once( .get(CONTENT_TYPE) .map(|content_type| content_type.to_str().unwrap().to_owned()); - let body = response - .text() + let body = futures::compat::Compat01As03::new(response.text()) + .map_ok(Some) .map_err(ErrBox::from); - Box::new( - Some(body).join3(future::ok(content_type), future::ok(None)) + futures::future::try_join3( + body, + future::ok(content_type), + future::ok(None), ) - } + .boxed() + }, ) .and_then(move |(maybe_code, maybe_content_type, maybe_redirect)| { if let Some(redirect) = maybe_redirect { @@ -150,7 +164,7 @@ mod tests { Ok(FetchOnceResult::Code(code, maybe_content_type)) => { assert!(!code.is_empty()); assert_eq!(maybe_content_type, Some("application/json".to_string())); - Ok(()) + futures::future::ok(()) } _ => panic!(), }); @@ -171,7 +185,7 @@ mod tests { let fut = fetch_string_once(&url).then(move |result| match result { Ok(FetchOnceResult::Redirect(url)) => { assert_eq!(url, target_url); - Ok(()) + futures::future::ok(()) } _ => panic!(), }); diff --git a/cli/lib.rs b/cli/lib.rs index 3d772bb83f6bf5..22478928230bf6 100644 --- a/cli/lib.rs +++ b/cli/lib.rs @@ -3,7 +3,6 @@ extern crate lazy_static; #[macro_use] extern crate log; -#[macro_use] extern crate futures; #[macro_use] extern crate serde_json; @@ -65,12 +64,13 @@ use deno::ErrBox; use deno::ModuleSpecifier; use flags::DenoFlags; use flags::DenoSubcommand; -use futures::lazy; -use futures::Future; +use futures::future::FutureExt; +use futures::future::TryFutureExt; use log::Level; use log::Metadata; use log::Record; use std::env; +use std::future::Future; static LOGGER: Logger = Logger; @@ -175,7 +175,7 @@ fn print_cache_info(worker: Worker) { pub fn print_file_info( worker: Worker, module_specifier: &ModuleSpecifier, -) -> impl Future { +) -> impl Future> { let global_state_ = worker.state.global_state.clone(); let state_ = worker.state.clone(); let module_specifier_ = module_specifier.clone(); @@ -249,7 +249,7 @@ pub fn print_file_info( colors::bold("deps:".to_string()), ); } - Ok(worker) + futures::future::ok(worker) }) }) } @@ -263,22 +263,22 @@ fn info_command(flags: DenoFlags, argv: Vec) { } let main_module = state.main_module.as_ref().unwrap().clone(); - let main_future = lazy(move || { - // Setup runtime. - js_check(worker.execute("denoMain()")); - debug!("main_module {}", main_module); - worker - .execute_mod_async(&main_module, None, true) - .map_err(print_err_and_exit) - .and_then(move |()| print_file_info(worker, &main_module)) - .and_then(|worker| { - worker.then(|result| { - js_check(result); - Ok(()) - }) + // Setup runtime. + js_check(worker.execute("denoMain()")); + debug!("main_module {}", main_module); + + let main_future = worker + .execute_mod_async(&main_module, None, true) + .map_err(print_err_and_exit) + .and_then(move |()| print_file_info(worker, &main_module)) + .and_then(|worker| { + worker.then(|result| { + js_check(result); + futures::future::ok(()) }) - }); + }); + tokio_util::run(main_future); } @@ -286,18 +286,19 @@ fn fetch_command(flags: DenoFlags, argv: Vec) { let (mut worker, state) = create_worker_and_state(flags, argv.clone()); let main_module = state.main_module.as_ref().unwrap().clone(); - let main_future = lazy(move || { - // Setup runtime. - js_check(worker.execute("denoMain()")); - debug!("main_module {}", main_module); + // Setup runtime. + js_check(worker.execute("denoMain()")); + debug!("main_module {}", main_module); + + let main_future = worker .execute_mod_async(&main_module, None, true) .then(|result| { js_check(result); - Ok(()) - }) - }); + futures::future::ok(()) + }); + tokio_util::run(main_future); } @@ -308,25 +309,23 @@ fn eval_command(flags: DenoFlags, argv: Vec) { let main_module = ModuleSpecifier::resolve_url_or_path("./__$deno$eval.ts").unwrap(); - let main_future = lazy(move || { - js_check(worker.execute("denoMain()")); - debug!("main_module {}", &main_module); + js_check(worker.execute("denoMain()")); + debug!("main_module {}", &main_module); - let mut worker_ = worker.clone(); - worker - .execute_mod_async(&main_module, Some(ts_source), false) - .and_then(move |()| { - js_check(worker.execute("window.dispatchEvent(new Event('load'))")); - worker.then(move |result| { - js_check(result); - js_check( - worker_.execute("window.dispatchEvent(new Event('unload'))"), - ); - Ok(()) - }) + let mut worker_ = worker.clone(); + + let main_future = worker + .execute_mod_async(&main_module, Some(ts_source), false) + .and_then(move |()| { + js_check(worker.execute("window.dispatchEvent(new Event('load'))")); + worker.then(move |result| { + js_check(result); + js_check(worker_.execute("window.dispatchEvent(new Event('unload'))")); + futures::future::ok(()) }) - .map_err(print_err_and_exit) - }); + }) + .map_err(print_err_and_exit); + tokio_util::run(main_future); } @@ -341,22 +340,20 @@ fn bundle_command(flags: DenoFlags, argv: Vec) { }; debug!(">>>>> bundle_async START"); // NOTE: we need to poll `worker` otherwise TS compiler worker won't run properly - let main_future = lazy(move || { - worker.then(move |result| { - js_check(result); - state - .ts_compiler - .bundle_async(state.clone(), main_module.to_string(), out_file) - .map_err(|err| { - debug!("diagnostics returned, exiting!"); - eprintln!(""); - print_err_and_exit(err); - }) - .and_then(move |_| { - debug!(">>>>> bundle_async END"); - Ok(()) - }) - }) + let main_future = worker.then(move |result| { + js_check(result); + state + .ts_compiler + .bundle_async(state.clone(), main_module.to_string(), out_file) + .map_err(|err| { + debug!("diagnostics returned, exiting!"); + eprintln!(""); + print_err_and_exit(err); + }) + .and_then(move |_| { + debug!(">>>>> bundle_async END"); + futures::future::ok(()) + }) }); tokio_util::run(main_future); } @@ -365,16 +362,15 @@ fn run_repl(flags: DenoFlags, argv: Vec) { let (mut worker, _state) = create_worker_and_state(flags, argv); // REPL situation. - let main_future = lazy(move || { - // Setup runtime. - js_check(worker.execute("denoMain()")); - worker - .then(|result| { - js_check(result); - Ok(()) - }) - .map_err(|(err, _worker): (ErrBox, Worker)| print_err_and_exit(err)) - }); + + // Setup runtime. + js_check(worker.execute("denoMain()")); + let main_future = worker + .then(|result| { + js_check(result); + futures::future::ok(()) + }) + .map_err(|(err, _worker): (ErrBox, Worker)| print_err_and_exit(err)); tokio_util::run(main_future); } @@ -384,39 +380,38 @@ fn run_script(flags: DenoFlags, argv: Vec) { let main_module = state.main_module.as_ref().unwrap().clone(); // Normal situation of executing a module. - let main_future = lazy(move || { - // Setup runtime. - js_check(worker.execute("denoMain()")); - debug!("main_module {}", main_module); - let mut worker_ = worker.clone(); + // Setup runtime. + js_check(worker.execute("denoMain()")); + debug!("main_module {}", main_module); - worker - .execute_mod_async(&main_module, None, false) - .and_then(move |()| { - if state.flags.lock_write { - if let Some(ref lockfile) = state.lockfile { - let g = lockfile.lock().unwrap(); - g.write()?; - } else { - eprintln!("--lock flag must be specified when using --lock-write"); - std::process::exit(11); + let mut worker_ = worker.clone(); + + let main_future = worker + .execute_mod_async(&main_module, None, false) + .and_then(move |()| { + if state.flags.lock_write { + if let Some(ref lockfile) = state.lockfile { + let g = lockfile.lock().unwrap(); + if let Err(e) = g.write() { + return futures::future::err(ErrBox::from(e)); } + } else { + eprintln!("--lock flag must be specified when using --lock-write"); + std::process::exit(11); } - Ok(()) - }) - .and_then(move |()| { - js_check(worker.execute("window.dispatchEvent(new Event('load'))")); - worker.then(move |result| { - js_check(result); - js_check( - worker_.execute("window.dispatchEvent(new Event('unload'))"), - ); - Ok(()) - }) + } + futures::future::ok(()) + }) + .and_then(move |()| { + js_check(worker.execute("window.dispatchEvent(new Event('load'))")); + worker.then(move |result| { + js_check(result); + js_check(worker_.execute("window.dispatchEvent(new Event('unload'))")); + futures::future::ok(()) }) - .map_err(print_err_and_exit) - }); + }) + .map_err(print_err_and_exit); if use_current_thread { tokio_util::run_on_current_thread(main_future); diff --git a/cli/ops/compiler.rs b/cli/ops/compiler.rs index a722db6af5f9fd..fdb62ca321c4dd 100644 --- a/cli/ops/compiler.rs +++ b/cli/ops/compiler.rs @@ -1,7 +1,8 @@ // Copyright 2018-2019 the Deno authors. All rights reserved. MIT license. use super::dispatch_json::{Deserialize, JsonOp, Value}; -use crate::futures::future::join_all; -use crate::futures::Future; +use crate::futures::future::try_join_all; +use crate::futures::future::FutureExt; +use crate::futures::future::TryFutureExt; use crate::msg; use crate::ops::json_op; use crate::state::ThreadSafeState; @@ -77,7 +78,7 @@ fn op_fetch_source_files( let global_state = state.global_state.clone(); - let future = join_all(futures) + let future = try_join_all(futures) .map_err(ErrBox::from) .and_then(move |files| { // We want to get an array of futures that resolves to @@ -88,17 +89,19 @@ fn op_fetch_source_files( // compile them into JS first! // This allows TS to do correct export types. if file.media_type == msg::MediaType::Wasm { - return futures::future::Either::A( + return futures::future::Either::Left( global_state .wasm_compiler .compile_async(global_state.clone(), &file) - .and_then(|compiled_mod| Ok((file, Some(compiled_mod.code)))), + .and_then(|compiled_mod| { + futures::future::ok((file, Some(compiled_mod.code))) + }), ); } - futures::future::Either::B(futures::future::ok((file, None))) + futures::future::Either::Right(futures::future::ok((file, None))) }) .collect(); - join_all(v) + try_join_all(v) }) .and_then(move |files_with_code| { let res = files_with_code @@ -120,7 +123,7 @@ fn op_fetch_source_files( futures::future::ok(res) }); - Ok(JsonOp::Async(Box::new(future))) + Ok(JsonOp::Async(future.boxed())) } #[derive(Deserialize)] diff --git a/cli/ops/dispatch_json.rs b/cli/ops/dispatch_json.rs index 5f1caac6f7acf1..38dc7932e0136c 100644 --- a/cli/ops/dispatch_json.rs +++ b/cli/ops/dispatch_json.rs @@ -1,13 +1,15 @@ // Copyright 2018-2019 the Deno authors. All rights reserved. MIT license. -use crate::tokio_util; use deno::*; -use futures::Future; -use futures::Poll; +use futures::future::FutureExt; +use futures::task::SpawnExt; pub use serde_derive::Deserialize; use serde_json::json; pub use serde_json::Value; +use std::future::Future; +use std::pin::Pin; -pub type AsyncJsonOp = Box + Send>; +pub type AsyncJsonOp = + Pin> + Send>>; pub enum JsonOp { Sync(Value), @@ -70,48 +72,35 @@ where } Ok(JsonOp::Async(fut)) => { assert!(promise_id.is_some()); - let fut2 = Box::new(fut.then(move |result| -> Result { - Ok(serialize_result(promise_id, result)) - })); - CoreOp::Async(fut2) + let fut2 = fut.then(move |result| { + futures::future::ok(serialize_result(promise_id, result)) + }); + CoreOp::Async(fut2.boxed()) } Err(sync_err) => { let buf = serialize_result(promise_id, Err(sync_err)); if is_sync { CoreOp::Sync(buf) } else { - CoreOp::Async(Box::new(futures::future::ok(buf))) + CoreOp::Async(futures::future::ok(buf).boxed()) } } } } } -// This is just type conversion. Implement From trait? -// See https://github.com/tokio-rs/tokio/blob/ffd73a64e7ec497622b7f939e38017afe7124dc4/tokio-fs/src/lib.rs#L76-L85 -fn convert_blocking_json(f: F) -> Poll -where - F: FnOnce() -> Result, -{ - use futures::Async::*; - match tokio_threadpool::blocking(f) { - Ok(Ready(Ok(v))) => Ok(Ready(v)), - Ok(Ready(Err(err))) => Err(err), - Ok(NotReady) => Ok(NotReady), - Err(err) => panic!("blocking error {}", err), - } -} - pub fn blocking_json(is_sync: bool, f: F) -> Result where - F: 'static + Send + FnOnce() -> Result, + F: 'static + Send + FnOnce() -> Result + Unpin, { if is_sync { Ok(JsonOp::Sync(f()?)) } else { - Ok(JsonOp::Async(Box::new(futures::sync::oneshot::spawn( - tokio_util::poll_fn(move || convert_blocking_json(f)), - &tokio_executor::DefaultExecutor::current(), - )))) + //TODO(afinch7) replace this with something more efficent. + let pool = futures::executor::ThreadPool::new().unwrap(); + let handle = pool + .spawn_with_handle(futures::future::lazy(move |_cx| f())) + .unwrap(); + Ok(JsonOp::Async(handle.boxed())) } } diff --git a/cli/ops/dispatch_minimal.rs b/cli/ops/dispatch_minimal.rs index 355a2463406bd3..13738ba56e6f63 100644 --- a/cli/ops/dispatch_minimal.rs +++ b/cli/ops/dispatch_minimal.rs @@ -12,9 +12,11 @@ use deno::CoreOp; use deno::ErrBox; use deno::Op; use deno::PinnedBuf; -use futures::Future; +use futures::future::FutureExt; +use std::future::Future; +use std::pin::Pin; -pub type MinimalOp = dyn Future + Send; +pub type MinimalOp = dyn Future> + Send; #[derive(Copy, Clone, Debug, PartialEq)] // This corresponds to RecordMinimal on the TS side. @@ -113,7 +115,7 @@ fn test_parse_min_record() { pub fn minimal_op(d: D) -> impl Fn(&[u8], Option) -> CoreOp where - D: Fn(i32, Option) -> Box, + D: Fn(i32, Option) -> Pin>, { move |control: &[u8], zero_copy: Option| { let mut record = match parse_min_record(control) { @@ -136,21 +138,19 @@ where let min_op = d(rid, zero_copy); // Convert to CoreOp - let fut = Box::new(min_op.then(move |result| -> Result { - match result { - Ok(r) => { - record.result = r; - Ok(record.into()) - } - Err(err) => { - let error_record = ErrorRecord { - promise_id: record.promise_id, - arg: -1, - error_code: err.kind() as i32, - error_message: err.to_string().as_bytes().to_owned(), - }; - Ok(error_record.into()) - } + let fut = Box::new(min_op.then(move |result| match result { + Ok(r) => { + record.result = r; + futures::future::ok(record.into()) + } + Err(err) => { + let error_record = ErrorRecord { + promise_id: record.promise_id, + arg: -1, + error_code: err.kind() as i32, + error_message: err.to_string().as_bytes().to_owned(), + }; + futures::future::ok(error_record.into()) } })); @@ -160,9 +160,9 @@ where // tokio_util::block_on. // This block is only exercised for readSync and writeSync, which I think // works since they're simple polling futures. - Op::Sync(fut.wait().unwrap()) + Op::Sync(futures::executor::block_on(fut).unwrap()) } else { - Op::Async(fut) + Op::Async(fut.boxed()) } } } diff --git a/cli/ops/fetch.rs b/cli/ops/fetch.rs index a1c0fe29cb139b..168d6dafce0c70 100644 --- a/cli/ops/fetch.rs +++ b/cli/ops/fetch.rs @@ -6,11 +6,12 @@ use crate::http_util::get_client; use crate::ops::json_op; use crate::state::ThreadSafeState; use deno::*; +use futures::future::FutureExt; +use futures::future::TryFutureExt; +use futures::task::SpawnExt; use http::header::HeaderName; use http::header::HeaderValue; use http::Method; -use hyper; -use hyper::rt::Future; use std; use std::convert::From; @@ -56,26 +57,32 @@ pub fn op_fetch( } debug!("Before fetch {}", url); let state_ = state.clone(); - let future = request.send().map_err(ErrBox::from).and_then(move |res| { - let status = res.status(); - let mut res_headers = Vec::new(); - for (key, val) in res.headers().iter() { - res_headers.push((key.to_string(), val.to_str().unwrap().to_owned())); - } + let future = futures::compat::Compat01As03::new(request.send()) + .map_err(ErrBox::from) + .and_then(move |res| { + debug!("Fetch response {}", url); + let status = res.status(); + let mut res_headers = Vec::new(); + for (key, val) in res.headers().iter() { + res_headers.push((key.to_string(), val.to_str().unwrap().to_owned())); + } - let body = HttpBody::from(res.into_body()); - let mut table = state_.lock_resource_table(); - let rid = table.add("httpBody", Box::new(StreamResource::HttpBody(body))); + let body = HttpBody::from(res.into_body()); + let mut table = state_.lock_resource_table(); + let rid = table.add("httpBody", Box::new(StreamResource::HttpBody(body))); - let json_res = json!({ - "bodyRid": rid, - "status": status.as_u16(), - "statusText": status.canonical_reason().unwrap_or(""), - "headers": res_headers + let json_res = json!({ + "bodyRid": rid, + "status": status.as_u16(), + "statusText": status.canonical_reason().unwrap_or(""), + "headers": res_headers + }); + + futures::future::ok(json_res) }); - futures::future::ok(json_res) - }); + let pool = futures::executor::ThreadPool::new().unwrap(); + let handle = pool.spawn_with_handle(future).unwrap(); - Ok(JsonOp::Async(Box::new(future))) + Ok(JsonOp::Async(handle.boxed())) } diff --git a/cli/ops/files.rs b/cli/ops/files.rs index fc1b8e7d8cfe00..1c041b38d99559 100644 --- a/cli/ops/files.rs +++ b/cli/ops/files.rs @@ -8,11 +8,15 @@ use crate::fs as deno_fs; use crate::ops::json_op; use crate::state::ThreadSafeState; use deno::*; -use futures::Future; -use futures::Poll; +use futures::future::FutureExt; +use futures::future::TryFutureExt; use std; use std::convert::From; +use std::future::Future; use std::io::SeekFrom; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; use tokio; pub fn init(i: &mut Isolate, s: &ThreadSafeState) { @@ -88,19 +92,21 @@ fn op_open( } let is_sync = args.promise_id.is_none(); - let op = open_options.open(filename).map_err(ErrBox::from).and_then( - move |fs_file| { - let mut table = state_.lock_resource_table(); - let rid = table.add("fsFile", Box::new(StreamResource::FsFile(fs_file))); - futures::future::ok(json!(rid)) - }, - ); + let op = futures::compat::Compat01As03::new(tokio::prelude::Future::map_err( + open_options.open(filename), + ErrBox::from, + )) + .and_then(move |fs_file| { + let mut table = state_.lock_resource_table(); + let rid = table.add("fsFile", Box::new(StreamResource::FsFile(fs_file))); + futures::future::ok(json!(rid)) + }); if is_sync { - let buf = op.wait()?; + let buf = futures::executor::block_on(op)?; Ok(JsonOp::Sync(buf)) } else { - Ok(JsonOp::Async(Box::new(op))) + Ok(JsonOp::Async(op.boxed())) } } @@ -128,21 +134,27 @@ pub struct SeekFuture { } impl Future for SeekFuture { - type Item = u64; - type Error = ErrBox; + type Output = Result; - fn poll(&mut self) -> Poll { - let mut table = self.state.lock_resource_table(); + fn poll(self: Pin<&mut Self>, _cx: &mut Context) -> Poll { + let inner = self.get_mut(); + let mut table = inner.state.lock_resource_table(); let resource = table - .get_mut::(self.rid) + .get_mut::(inner.rid) .ok_or_else(bad_resource)?; let tokio_file = match resource { StreamResource::FsFile(ref mut file) => file, - _ => return Err(bad_resource()), + _ => return Poll::Ready(Err(bad_resource())), }; - tokio_file.poll_seek(self.seek_from).map_err(ErrBox::from) + use tokio::prelude::Async::*; + + match tokio_file.poll_seek(inner.seek_from).map_err(ErrBox::from) { + Ok(Ready(v)) => Poll::Ready(Ok(v)), + Err(err) => Poll::Ready(Err(err)), + Ok(NotReady) => Poll::Pending, + } } } @@ -185,9 +197,9 @@ fn op_seek( let op = fut.and_then(move |_| futures::future::ok(json!({}))); if args.promise_id.is_none() { - let buf = op.wait()?; + let buf = futures::executor::block_on(op)?; Ok(JsonOp::Sync(buf)) } else { - Ok(JsonOp::Async(Box::new(op))) + Ok(JsonOp::Async(op.boxed())) } } diff --git a/cli/ops/io.rs b/cli/ops/io.rs index 30c93399973b3c..197c87c7c70496 100644 --- a/cli/ops/io.rs +++ b/cli/ops/io.rs @@ -8,11 +8,16 @@ use deno::ErrBox; use deno::Resource; use deno::*; use futures; -use futures::Future; -use futures::Poll; +use futures::compat::AsyncRead01CompatExt; +use futures::compat::AsyncWrite01CompatExt; +use futures::future::FutureExt; +use futures::io::{AsyncRead, AsyncWrite}; use std; +use std::future::Future; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; use tokio; -use tokio::io::{AsyncRead, AsyncWrite}; use tokio::net::TcpStream; use tokio_process; use tokio_rustls::client::TlsStream as ClientTlsStream; @@ -91,26 +96,49 @@ impl Resource for StreamResource {} /// `DenoAsyncRead` is the same as the `tokio_io::AsyncRead` trait /// but uses an `ErrBox` error instead of `std::io:Error` pub trait DenoAsyncRead { - fn poll_read(&mut self, buf: &mut [u8]) -> Poll; + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context, + buf: &mut [u8], + ) -> Poll>; } impl DenoAsyncRead for StreamResource { - fn poll_read(&mut self, buf: &mut [u8]) -> Poll { - let r = match self { - StreamResource::FsFile(ref mut f) => f.poll_read(buf), - StreamResource::Stdin(ref mut f) => f.poll_read(buf), - StreamResource::TcpStream(ref mut f) => f.poll_read(buf), - StreamResource::ClientTlsStream(ref mut f) => f.poll_read(buf), - StreamResource::ServerTlsStream(ref mut f) => f.poll_read(buf), - StreamResource::HttpBody(ref mut f) => f.poll_read(buf), - StreamResource::ChildStdout(ref mut f) => f.poll_read(buf), - StreamResource::ChildStderr(ref mut f) => f.poll_read(buf), + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context, + buf: &mut [u8], + ) -> Poll> { + let inner = self.get_mut(); + let mut f: Box = match inner { + StreamResource::FsFile(f) => Box::new(AsyncRead01CompatExt::compat(f)), + StreamResource::Stdin(f) => Box::new(AsyncRead01CompatExt::compat(f)), + StreamResource::TcpStream(f) => Box::new(AsyncRead01CompatExt::compat(f)), + StreamResource::ClientTlsStream(f) => { + Box::new(AsyncRead01CompatExt::compat(f)) + } + StreamResource::ServerTlsStream(f) => { + Box::new(AsyncRead01CompatExt::compat(f)) + } + StreamResource::HttpBody(f) => Box::new(f), + StreamResource::ChildStdout(f) => { + Box::new(AsyncRead01CompatExt::compat(f)) + } + StreamResource::ChildStderr(f) => { + Box::new(AsyncRead01CompatExt::compat(f)) + } _ => { - return Err(bad_resource()); + return Poll::Ready(Err(bad_resource())); } }; - r.map_err(ErrBox::from) + let r = AsyncRead::poll_read(Pin::new(&mut f), cx, buf); + + match r { + Poll::Ready(Err(e)) => Poll::Ready(Err(ErrBox::from(e))), + Poll::Ready(Ok(v)) => Poll::Ready(Ok(v)), + Poll::Pending => Poll::Pending, + } } } @@ -150,23 +178,31 @@ pub struct Read { impl Future for Read where - T: AsMut<[u8]>, + T: AsMut<[u8]> + Unpin, { - type Item = usize; - type Error = ErrBox; + type Output = Result; - fn poll(&mut self) -> Poll { - if self.io_state == IoState::Done { + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let inner = self.get_mut(); + if inner.io_state == IoState::Done { panic!("poll a Read after it's done"); } - let mut table = self.state.lock_resource_table(); + let mut table = inner.state.lock_resource_table(); let resource = table - .get_mut::(self.rid) + .get_mut::(inner.rid) .ok_or_else(bad_resource)?; - let nread = try_ready!(resource.poll_read(&mut self.buf.as_mut()[..])); - self.io_state = IoState::Done; - Ok(nread.into()) + let nread = match DenoAsyncRead::poll_read( + Pin::new(resource), + cx, + &mut inner.buf.as_mut()[..], + ) { + Poll::Ready(Ok(v)) => v, + Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), + Poll::Pending => return Poll::Pending, + }; + inner.io_state = IoState::Done; + Poll::Ready(Ok(nread as i32)) } } @@ -174,49 +210,76 @@ pub fn op_read( state: &ThreadSafeState, rid: i32, zero_copy: Option, -) -> Box { +) -> Pin> { debug!("read rid={}", rid); let zero_copy = match zero_copy { None => { - return Box::new(futures::future::err(deno_error::no_buffer_specified())); + return futures::future::err(deno_error::no_buffer_specified()).boxed() } Some(buf) => buf, }; - let fut = read(state, rid as u32, zero_copy) - .map_err(ErrBox::from) - .and_then(move |nread| Ok(nread as i32)); + let fut = read(state, rid as u32, zero_copy); - Box::new(fut) + fut.boxed() } /// `DenoAsyncWrite` is the same as the `tokio_io::AsyncWrite` trait /// but uses an `ErrBox` error instead of `std::io:Error` pub trait DenoAsyncWrite { - fn poll_write(&mut self, buf: &[u8]) -> Poll; - - fn shutdown(&mut self) -> Poll<(), ErrBox>; + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context, + buf: &[u8], + ) -> Poll>; + + fn poll_close( + self: Pin<&mut Self>, + cx: &mut Context, + ) -> Poll>; } impl DenoAsyncWrite for StreamResource { - fn poll_write(&mut self, buf: &[u8]) -> Poll { - let r = match self { - StreamResource::FsFile(ref mut f) => f.poll_write(buf), - StreamResource::Stdout(ref mut f) => f.poll_write(buf), - StreamResource::Stderr(ref mut f) => f.poll_write(buf), - StreamResource::TcpStream(ref mut f) => f.poll_write(buf), - StreamResource::ClientTlsStream(ref mut f) => f.poll_write(buf), - StreamResource::ServerTlsStream(ref mut f) => f.poll_write(buf), - StreamResource::ChildStdin(ref mut f) => f.poll_write(buf), + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context, + buf: &[u8], + ) -> Poll> { + let inner = self.get_mut(); + let mut f: Box = match inner { + StreamResource::FsFile(f) => Box::new(AsyncWrite01CompatExt::compat(f)), + StreamResource::Stdout(f) => Box::new(AsyncWrite01CompatExt::compat(f)), + StreamResource::Stderr(f) => Box::new(AsyncWrite01CompatExt::compat(f)), + StreamResource::TcpStream(f) => { + Box::new(AsyncWrite01CompatExt::compat(f)) + } + StreamResource::ClientTlsStream(f) => { + Box::new(AsyncWrite01CompatExt::compat(f)) + } + StreamResource::ServerTlsStream(f) => { + Box::new(AsyncWrite01CompatExt::compat(f)) + } + StreamResource::ChildStdin(f) => { + Box::new(AsyncWrite01CompatExt::compat(f)) + } _ => { - return Err(bad_resource()); + return Poll::Ready(Err(bad_resource())); } }; - r.map_err(ErrBox::from) + let r = AsyncWrite::poll_write(Pin::new(&mut f), cx, buf); + + match r { + Poll::Ready(Err(e)) => Poll::Ready(Err(ErrBox::from(e))), + Poll::Ready(Ok(v)) => Poll::Ready(Ok(v)), + Poll::Pending => Poll::Pending, + } } - fn shutdown(&mut self) -> futures::Poll<(), ErrBox> { + fn poll_close( + self: Pin<&mut Self>, + _cx: &mut Context, + ) -> Poll> { unimplemented!() } } @@ -250,23 +313,31 @@ where /// that error type is `ErrBox` instead of `std::io::Error`. impl Future for Write where - T: AsRef<[u8]>, + T: AsRef<[u8]> + Unpin, { - type Item = usize; - type Error = ErrBox; + type Output = Result; - fn poll(&mut self) -> Poll { - if self.io_state == IoState::Done { + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let inner = self.get_mut(); + if inner.io_state == IoState::Done { panic!("poll a Read after it's done"); } - let mut table = self.state.lock_resource_table(); + let mut table = inner.state.lock_resource_table(); let resource = table - .get_mut::(self.rid) + .get_mut::(inner.rid) .ok_or_else(bad_resource)?; - let nwritten = try_ready!(resource.poll_write(self.buf.as_ref())); - self.io_state = IoState::Done; - Ok(nwritten.into()) + let nwritten = match DenoAsyncWrite::poll_write( + Pin::new(resource), + cx, + inner.buf.as_ref(), + ) { + Poll::Ready(Ok(v)) => v, + Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), + Poll::Pending => return Poll::Pending, + }; + inner.io_state = IoState::Done; + Poll::Ready(Ok(nwritten as i32)) } } @@ -274,18 +345,16 @@ pub fn op_write( state: &ThreadSafeState, rid: i32, zero_copy: Option, -) -> Box { +) -> Pin> { debug!("write rid={}", rid); let zero_copy = match zero_copy { None => { - return Box::new(futures::future::err(deno_error::no_buffer_specified())); + return futures::future::err(deno_error::no_buffer_specified()).boxed() } Some(buf) => buf, }; - let fut = write(state, rid as u32, zero_copy) - .map_err(ErrBox::from) - .and_then(move |nwritten| Ok(nwritten as i32)); + let fut = write(state, rid as u32, zero_copy); - Box::new(fut) + fut.boxed() } diff --git a/cli/ops/net.rs b/cli/ops/net.rs index 2fe81e1403bd98..929b87dde1f593 100644 --- a/cli/ops/net.rs +++ b/cli/ops/net.rs @@ -7,14 +7,20 @@ use crate::resolve_addr::resolve_addr; use crate::state::ThreadSafeState; use deno::Resource; use deno::*; -use futures::Async; -use futures::Future; -use futures::Poll; +use futures::future::FutureExt; +use futures::future::TryFutureExt; +use futures::stream::StreamExt; +use futures::stream::TryStreamExt; use std; use std::convert::From; +use std::future::Future; use std::net::Shutdown; use std::net::SocketAddr; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; use tokio; +use tokio::net::tcp::Incoming; use tokio::net::TcpListener; use tokio::net::TcpStream; @@ -49,17 +55,17 @@ pub struct Accept { } impl Future for Accept { - type Item = (TcpStream, SocketAddr); - type Error = ErrBox; + type Output = Result<(TcpStream, SocketAddr), ErrBox>; - fn poll(&mut self) -> Poll { - if self.accept_state == AcceptState::Done { + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let inner = self.get_mut(); + if inner.accept_state == AcceptState::Done { panic!("poll Accept after it's done"); } - let mut table = self.state.lock_resource_table(); + let mut table = inner.state.lock_resource_table(); let listener_resource = table - .get_mut::(self.rid) + .get_mut::(inner.rid) .ok_or_else(|| { let e = std::io::Error::new( std::io::ErrorKind::Other, @@ -68,44 +74,50 @@ impl Future for Accept { ErrBox::from(e) })?; - let listener = &mut listener_resource.listener; + let mut listener = + futures::compat::Compat01As03::new(&mut listener_resource.listener) + .map_err(ErrBox::from); - if self.accept_state == AcceptState::Eager { + if inner.accept_state == AcceptState::Eager { // Similar to try_ready!, but also track/untrack accept task // in TcpListener resource. // In this way, when the listener is closed, the task can be // notified to error out (instead of stuck forever). - match listener.poll_accept().map_err(ErrBox::from) { - Ok(Async::Ready((stream, addr))) => { - self.accept_state = AcceptState::Done; - return Ok((stream, addr).into()); + match listener.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(stream))) => { + inner.accept_state = AcceptState::Done; + let addr = stream.peer_addr().unwrap(); + return Poll::Ready(Ok((stream, addr))); } - Ok(Async::NotReady) => { - self.accept_state = AcceptState::Pending; - return Ok(Async::NotReady); + Poll::Pending => { + inner.accept_state = AcceptState::Pending; + return Poll::Pending; } - Err(e) => { - self.accept_state = AcceptState::Done; - return Err(e); + Poll::Ready(Some(Err(e))) => { + inner.accept_state = AcceptState::Done; + return Poll::Ready(Err(e)); } + _ => unreachable!(), } } - match listener.poll_accept().map_err(ErrBox::from) { - Ok(Async::Ready((stream, addr))) => { + match listener.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(stream))) => { listener_resource.untrack_task(); - self.accept_state = AcceptState::Done; - Ok((stream, addr).into()) + inner.accept_state = AcceptState::Done; + let addr = stream.peer_addr().unwrap(); + Poll::Ready(Ok((stream, addr))) } - Ok(Async::NotReady) => { - listener_resource.track_task()?; - Ok(Async::NotReady) + Poll::Pending => { + listener_resource.track_task(cx)?; + Poll::Pending } - Err(e) => { + Poll::Ready(Some(Err(e))) => { listener_resource.untrack_task(); - self.accept_state = AcceptState::Done; - Err(e) + inner.accept_state = AcceptState::Done; + Poll::Ready(Err(e)) } + _ => unreachable!(), } } } @@ -130,12 +142,18 @@ fn op_accept( let op = accept(state, rid) .and_then(move |(tcp_stream, _socket_addr)| { - let local_addr = tcp_stream.local_addr()?; - let remote_addr = tcp_stream.peer_addr()?; + let local_addr = match tcp_stream.local_addr() { + Ok(v) => v, + Err(e) => return futures::future::err(ErrBox::from(e)), + }; + let remote_addr = match tcp_stream.peer_addr() { + Ok(v) => v, + Err(e) => return futures::future::err(ErrBox::from(e)), + }; let mut table = state_.lock_resource_table(); let rid = table.add("tcpStream", Box::new(StreamResource::TcpStream(tcp_stream))); - Ok((rid, local_addr, remote_addr)) + futures::future::ok((rid, local_addr, remote_addr)) }) .map_err(ErrBox::from) .and_then(move |(rid, local_addr, remote_addr)| { @@ -146,7 +164,7 @@ fn op_accept( })) }); - Ok(JsonOp::Async(Box::new(op))) + Ok(JsonOp::Async(op.boxed())) } #[derive(Deserialize)] @@ -167,15 +185,21 @@ fn op_dial( state.check_net(&args.hostname, args.port)?; let op = resolve_addr(&args.hostname, args.port).and_then(move |addr| { - TcpStream::connect(&addr) + futures::compat::Compat01As03::new(TcpStream::connect(&addr)) .map_err(ErrBox::from) .and_then(move |tcp_stream| { - let local_addr = tcp_stream.local_addr()?; - let remote_addr = tcp_stream.peer_addr()?; + let local_addr = match tcp_stream.local_addr() { + Ok(v) => v, + Err(e) => return futures::future::err(ErrBox::from(e)), + }; + let remote_addr = match tcp_stream.peer_addr() { + Ok(v) => v, + Err(e) => return futures::future::err(ErrBox::from(e)), + }; let mut table = state_.lock_resource_table(); let rid = table .add("tcpStream", Box::new(StreamResource::TcpStream(tcp_stream))); - Ok((rid, local_addr, remote_addr)) + futures::future::ok((rid, local_addr, remote_addr)) }) .map_err(ErrBox::from) .and_then(move |(rid, local_addr, remote_addr)| { @@ -187,7 +211,7 @@ fn op_dial( }) }); - Ok(JsonOp::Async(Box::new(op))) + Ok(JsonOp::Async(op.boxed())) } #[derive(Deserialize)] @@ -235,8 +259,8 @@ struct ListenArgs { #[allow(dead_code)] struct TcpListenerResource { - listener: tokio::net::TcpListener, - task: Option, + listener: Incoming, + waker: Option, local_addr: SocketAddr, } @@ -244,7 +268,7 @@ impl Resource for TcpListenerResource {} impl Drop for TcpListenerResource { fn drop(&mut self) { - self.notify_task(); + self.wake_task(); } } @@ -253,12 +277,12 @@ impl TcpListenerResource { /// can be notified when listener is closed. /// /// Throws an error if another task is already tracked. - pub fn track_task(&mut self) -> Result<(), ErrBox> { + pub fn track_task(&mut self, cx: &Context) -> Result<(), ErrBox> { // Currently, we only allow tracking a single accept task for a listener. // This might be changed in the future with multiple workers. // Caveat: TcpListener by itself also only tracks an accept task at a time. // See https://github.com/tokio-rs/tokio/issues/846#issuecomment-454208883 - if self.task.is_some() { + if self.waker.is_some() { let e = std::io::Error::new( std::io::ErrorKind::Other, "Another accept task is ongoing", @@ -266,22 +290,24 @@ impl TcpListenerResource { return Err(ErrBox::from(e)); } - self.task.replace(futures::task::current()); + let waker = futures::task::AtomicWaker::new(); + waker.register(cx.waker()); + self.waker.replace(waker); Ok(()) } /// Notifies a task when listener is closed so accept future can resolve. - pub fn notify_task(&mut self) { - if let Some(task) = self.task.take() { - task.notify(); + pub fn wake_task(&mut self) { + if let Some(waker) = self.waker.as_ref() { + waker.wake(); } } /// Stop tracking a task. /// Happens when the task is done and thus no further tracking is needed. pub fn untrack_task(&mut self) { - if self.task.is_some() { - self.task.take(); + if self.waker.is_some() { + self.waker.take(); } } } @@ -296,17 +322,19 @@ fn op_listen( state.check_net(&args.hostname, args.port)?; - let addr = resolve_addr(&args.hostname, args.port).wait()?; + let addr = + futures::executor::block_on(resolve_addr(&args.hostname, args.port))?; let listener = TcpListener::bind(&addr)?; let local_addr = listener.local_addr()?; let local_addr_str = local_addr.to_string(); let listener_resource = TcpListenerResource { - listener, - task: None, + listener: listener.incoming(), + waker: None, local_addr, }; let mut table = state.lock_resource_table(); let rid = table.add("tcpListener", Box::new(listener_resource)); + debug!("New listener {} {}", rid, local_addr_str); Ok(JsonOp::Sync(json!({ "rid": rid, diff --git a/cli/ops/process.rs b/cli/ops/process.rs index 237b02fd05f7fd..a267130ec9bd40 100644 --- a/cli/ops/process.rs +++ b/cli/ops/process.rs @@ -7,12 +7,18 @@ use crate::signal::kill; use crate::state::ThreadSafeState; use deno::*; use futures; -use futures::Future; -use futures::Poll; +use futures::future::FutureExt; +use futures::future::TryFutureExt; +use futures::task::SpawnExt; use std; use std::convert::From; +use std::future::Future; +use std::pin::Pin; use std::process::Command; use std::process::ExitStatus; +use std::task::Context; +use std::task::Poll; +use tokio::prelude::Async; use tokio_process::CommandExt; #[cfg(unix)] @@ -33,19 +39,23 @@ struct CloneFileFuture { } impl Future for CloneFileFuture { - type Item = tokio::fs::File; - type Error = ErrBox; + type Output = Result; - fn poll(&mut self) -> Poll { - let mut table = self.state.lock_resource_table(); + fn poll(self: Pin<&mut Self>, _cx: &mut Context) -> Poll { + let inner = self.get_mut(); + let mut table = inner.state.lock_resource_table(); let repr = table - .get_mut::(self.rid) + .get_mut::(inner.rid) .ok_or_else(bad_resource)?; match repr { StreamResource::FsFile(ref mut file) => { - file.poll_try_clone().map_err(ErrBox::from) + match file.poll_try_clone().map_err(ErrBox::from) { + Err(err) => Poll::Ready(Err(err)), + Ok(Async::Ready(v)) => Poll::Ready(Ok(v)), + Ok(Async::NotReady) => Poll::Pending, + } } - _ => Err(bad_resource()), + _ => Poll::Ready(Err(bad_resource())), } } } @@ -54,11 +64,10 @@ fn clone_file( rid: u32, state: &ThreadSafeState, ) -> Result { - (CloneFileFuture { + futures::executor::block_on(CloneFileFuture { rid, state: state.clone(), }) - .wait() .map(|f| f.into_std()) } @@ -86,7 +95,7 @@ struct RunArgs { } struct ChildResource { - child: tokio_process::Child, + child: futures::compat::Compat01As03, } impl Resource for ChildResource {} @@ -179,7 +188,9 @@ fn op_run( None => None, }; - let child_resource = ChildResource { child }; + let child_resource = ChildResource { + child: futures::compat::Compat01As03::new(child), + }; let child_rid = table.add("child", Box::new(child_resource)); Ok(JsonOp::Sync(json!({ @@ -197,16 +208,16 @@ pub struct ChildStatus { } impl Future for ChildStatus { - type Item = ExitStatus; - type Error = ErrBox; + type Output = Result; - fn poll(&mut self) -> Poll { - let mut table = self.state.lock_resource_table(); + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let inner = self.get_mut(); + let mut table = inner.state.lock_resource_table(); let child_resource = table - .get_mut::(self.rid) + .get_mut::(inner.rid) .ok_or_else(bad_resource)?; let child = &mut child_resource.child; - child.poll().map_err(ErrBox::from) + child.map_err(ErrBox::from).poll_unpin(cx) } } @@ -251,7 +262,10 @@ fn op_run_status( })) }); - Ok(JsonOp::Async(Box::new(future))) + let pool = futures::executor::ThreadPool::new().unwrap(); + let handle = pool.spawn_with_handle(future).unwrap(); + + Ok(JsonOp::Async(handle.boxed())) } #[derive(Deserialize)] diff --git a/cli/ops/timers.rs b/cli/ops/timers.rs index 9d87aaf5c5a9ba..7223633f8783cd 100644 --- a/cli/ops/timers.rs +++ b/cli/ops/timers.rs @@ -3,7 +3,7 @@ use super::dispatch_json::{Deserialize, JsonOp, Value}; use crate::ops::json_op; use crate::state::ThreadSafeState; use deno::*; -use futures::Future; +use futures::future::FutureExt; use std; use std::time::Duration; use std::time::Instant; @@ -51,7 +51,7 @@ fn op_global_timer( .new_timeout(deadline) .then(move |_| futures::future::ok(json!({}))); - Ok(JsonOp::Async(Box::new(f))) + Ok(JsonOp::Async(f.boxed())) } // Returns a milliseconds and nanoseconds subsec diff --git a/cli/ops/tls.rs b/cli/ops/tls.rs index 48419f76f16c77..92c883ac189d98 100644 --- a/cli/ops/tls.rs +++ b/cli/ops/tls.rs @@ -9,18 +9,22 @@ use crate::resolve_addr::resolve_addr; use crate::state::ThreadSafeState; use deno::Resource; use deno::*; -use futures::Async; -use futures::Future; -use futures::Poll; +use futures::future::FutureExt; +use futures::future::TryFutureExt; use std; use std::convert::From; use std::fs::File; +use std::future::Future; use std::io::BufReader; use std::net::SocketAddr; +use std::pin::Pin; use std::sync::Arc; +use std::task::Context; +use std::task::Poll; use tokio; use tokio::net::TcpListener; use tokio::net::TcpStream; +use tokio::prelude::Async; use tokio_rustls::{rustls::ClientConfig, TlsConnector}; use tokio_rustls::{ rustls::{ @@ -72,49 +76,64 @@ pub fn op_dial_tls( } let op = resolve_addr(&args.hostname, args.port).and_then(move |addr| { - TcpStream::connect(&addr) + futures::compat::Compat01As03::new(TcpStream::connect(&addr)) .and_then(move |tcp_stream| { - let local_addr = tcp_stream.local_addr()?; - let remote_addr = tcp_stream.peer_addr()?; + let local_addr = match tcp_stream.local_addr() { + Ok(v) => v, + Err(e) => return futures::future::err(e), + }; + let remote_addr = match tcp_stream.peer_addr() { + Ok(v) => v, + Err(e) => return futures::future::err(e), + }; let mut config = ClientConfig::new(); config .root_store .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); if let Some(path) = cert_file { - let key_file = File::open(path)?; + let key_file = match File::open(path) { + Ok(v) => v, + Err(e) => return futures::future::err(e), + }; let reader = &mut BufReader::new(key_file); config.root_store.add_pem_file(reader).unwrap(); } let tls_connector = TlsConnector::from(Arc::new(config)); - Ok((tls_connector, tcp_stream, local_addr, remote_addr)) + futures::future::ok(( + tls_connector, + tcp_stream, + local_addr, + remote_addr, + )) }) .map_err(ErrBox::from) .and_then( move |(tls_connector, tcp_stream, local_addr, remote_addr)| { let dnsname = DNSNameRef::try_from_ascii_str(&domain) .expect("Invalid DNS lookup"); - tls_connector - .connect(dnsname, tcp_stream) - .map_err(ErrBox::from) - .and_then(move |tls_stream| { - let mut table = state_.lock_resource_table(); - let rid = table.add( - "clientTlsStream", - Box::new(StreamResource::ClientTlsStream(Box::new(tls_stream))), - ); - futures::future::ok(json!({ - "rid": rid, - "localAddr": local_addr.to_string(), - "remoteAddr": remote_addr.to_string(), - })) - }) + futures::compat::Compat01As03::new( + tls_connector.connect(dnsname, tcp_stream), + ) + .map_err(ErrBox::from) + .and_then(move |tls_stream| { + let mut table = state_.lock_resource_table(); + let rid = table.add( + "clientTlsStream", + Box::new(StreamResource::ClientTlsStream(Box::new(tls_stream))), + ); + futures::future::ok(json!({ + "rid": rid, + "localAddr": local_addr.to_string(), + "remoteAddr": remote_addr.to_string(), + })) + }) }, ) }); - Ok(JsonOp::Async(Box::new(op))) + Ok(JsonOp::Async(op.boxed())) } fn load_certs(path: &str) -> Result, ErrBox> { @@ -179,7 +198,7 @@ fn load_keys(path: &str) -> Result, ErrBox> { pub struct TlsListenerResource { listener: tokio::net::TcpListener, tls_acceptor: TlsAcceptor, - task: Option, + waker: Option, local_addr: SocketAddr, } @@ -187,7 +206,7 @@ impl Resource for TlsListenerResource {} impl Drop for TlsListenerResource { fn drop(&mut self) { - self.notify_task(); + self.wake_task(); } } @@ -196,12 +215,12 @@ impl TlsListenerResource { /// can be notified when listener is closed. /// /// Throws an error if another task is already tracked. - pub fn track_task(&mut self) -> Result<(), ErrBox> { + pub fn track_task(&mut self, cx: &Context) -> Result<(), ErrBox> { // Currently, we only allow tracking a single accept task for a listener. // This might be changed in the future with multiple workers. // Caveat: TcpListener by itself also only tracks an accept task at a time. // See https://github.com/tokio-rs/tokio/issues/846#issuecomment-454208883 - if self.task.is_some() { + if self.waker.is_some() { let e = std::io::Error::new( std::io::ErrorKind::Other, "Another accept task is ongoing", @@ -209,22 +228,24 @@ impl TlsListenerResource { return Err(ErrBox::from(e)); } - self.task.replace(futures::task::current()); + let waker = futures::task::AtomicWaker::new(); + waker.register(cx.waker()); + self.waker.replace(waker); Ok(()) } /// Notifies a task when listener is closed so accept future can resolve. - pub fn notify_task(&mut self) { - if let Some(task) = self.task.take() { - task.notify(); + pub fn wake_task(&mut self) { + if let Some(waker) = self.waker.as_ref() { + waker.wake(); } } /// Stop tracking a task. /// Happens when the task is done and thus no further tracking is needed. pub fn untrack_task(&mut self) { - if self.task.is_some() { - self.task.take(); + if self.waker.is_some() { + self.waker.take(); } } } @@ -259,14 +280,15 @@ fn op_listen_tls( .set_single_cert(load_certs(&cert_file)?, load_keys(&key_file)?.remove(0)) .expect("invalid key or certificate"); let tls_acceptor = TlsAcceptor::from(Arc::new(config)); - let addr = resolve_addr(&args.hostname, args.port).wait()?; + let addr = + futures::executor::block_on(resolve_addr(&args.hostname, args.port))?; let listener = TcpListener::bind(&addr)?; let local_addr = listener.local_addr()?; let local_addr_str = local_addr.to_string(); let tls_listener_resource = TlsListenerResource { listener, tls_acceptor, - task: None, + waker: None, local_addr, }; let mut table = state.lock_resource_table(); @@ -302,17 +324,17 @@ pub struct AcceptTls { } impl Future for AcceptTls { - type Item = (TcpStream, SocketAddr); - type Error = ErrBox; + type Output = Result<(TcpStream, SocketAddr), ErrBox>; - fn poll(&mut self) -> Poll { - if self.accept_state == AcceptTlsState::Done { + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let inner = self.get_mut(); + if inner.accept_state == AcceptTlsState::Done { panic!("poll AcceptTls after it's done"); } - let mut table = self.state.lock_resource_table(); + let mut table = inner.state.lock_resource_table(); let listener_resource = table - .get_mut::(self.rid) + .get_mut::(inner.rid) .ok_or_else(|| { let e = std::io::Error::new( std::io::ErrorKind::Other, @@ -323,23 +345,23 @@ impl Future for AcceptTls { let listener = &mut listener_resource.listener; - if self.accept_state == AcceptTlsState::Eager { + if inner.accept_state == AcceptTlsState::Eager { // Similar to try_ready!, but also track/untrack accept task // in TcpListener resource. // In this way, when the listener is closed, the task can be // notified to error out (instead of stuck forever). match listener.poll_accept().map_err(ErrBox::from) { Ok(Async::Ready((stream, addr))) => { - self.accept_state = AcceptTlsState::Done; - return Ok((stream, addr).into()); + inner.accept_state = AcceptTlsState::Done; + return Poll::Ready(Ok((stream, addr))); } Ok(Async::NotReady) => { - self.accept_state = AcceptTlsState::Pending; - return Ok(Async::NotReady); + inner.accept_state = AcceptTlsState::Pending; + return Poll::Pending; } Err(e) => { - self.accept_state = AcceptTlsState::Done; - return Err(e); + inner.accept_state = AcceptTlsState::Done; + return Poll::Ready(Err(e)); } } } @@ -347,17 +369,17 @@ impl Future for AcceptTls { match listener.poll_accept().map_err(ErrBox::from) { Ok(Async::Ready((stream, addr))) => { listener_resource.untrack_task(); - self.accept_state = AcceptTlsState::Done; - Ok((stream, addr).into()) + inner.accept_state = AcceptTlsState::Done; + Poll::Ready(Ok((stream, addr))) } Ok(Async::NotReady) => { - listener_resource.track_task()?; - Ok(Async::NotReady) + listener_resource.track_task(cx)?; + Poll::Pending } Err(e) => { listener_resource.untrack_task(); - self.accept_state = AcceptTlsState::Done; - Err(e) + inner.accept_state = AcceptTlsState::Done; + Poll::Ready(Err(e)) } } } @@ -379,9 +401,15 @@ fn op_accept_tls( let state2 = state.clone(); let op = accept_tls(state, rid) .and_then(move |(tcp_stream, _socket_addr)| { - let local_addr = tcp_stream.local_addr()?; - let remote_addr = tcp_stream.peer_addr()?; - Ok((tcp_stream, local_addr, remote_addr)) + let local_addr = match tcp_stream.local_addr() { + Ok(v) => v, + Err(e) => return futures::future::err(ErrBox::from(e)), + }; + let remote_addr = match tcp_stream.peer_addr() { + Ok(v) => v, + Err(e) => return futures::future::err(ErrBox::from(e)), + }; + futures::future::ok((tcp_stream, local_addr, remote_addr)) }) .and_then(move |(tcp_stream, local_addr, remote_addr)| { let table = state1.lock_resource_table(); @@ -390,18 +418,18 @@ fn op_accept_tls( .ok_or_else(bad_resource) .expect("Can't find tls listener"); - resource - .tls_acceptor - .accept(tcp_stream) - .map_err(ErrBox::from) - .and_then(move |tls_stream| { - let mut table = state2.lock_resource_table(); - let rid = table.add( - "serverTlsStream", - Box::new(StreamResource::ServerTlsStream(Box::new(tls_stream))), - ); - Ok((rid, local_addr, remote_addr)) - }) + futures::compat::Compat01As03::new( + resource.tls_acceptor.accept(tcp_stream), + ) + .map_err(ErrBox::from) + .and_then(move |tls_stream| { + let mut table = state2.lock_resource_table(); + let rid = table.add( + "serverTlsStream", + Box::new(StreamResource::ServerTlsStream(Box::new(tls_stream))), + ); + futures::future::ok((rid, local_addr, remote_addr)) + }) }) .and_then(move |(rid, local_addr, remote_addr)| { futures::future::ok(json!({ @@ -411,5 +439,5 @@ fn op_accept_tls( })) }); - Ok(JsonOp::Async(Box::new(op))) + Ok(JsonOp::Async(op.boxed())) } diff --git a/cli/ops/workers.rs b/cli/ops/workers.rs index cf7378a91a22e9..ee60c682498401 100644 --- a/cli/ops/workers.rs +++ b/cli/ops/workers.rs @@ -7,16 +7,21 @@ use crate::deno_error::ErrorKind; use crate::ops::json_op; use crate::startup_data; use crate::state::ThreadSafeState; +use crate::tokio_util; use crate::worker::Worker; use deno::*; use futures; -use futures::Async; -use futures::Future; -use futures::Sink; -use futures::Stream; +use futures::future::FutureExt; +use futures::future::TryFutureExt; +use futures::sink::SinkExt; +use futures::stream::StreamExt; use std; use std::convert::From; +use std::future::Future; +use std::pin::Pin; use std::sync::atomic::Ordering; +use std::task::Context; +use std::task::Poll; pub fn init(i: &mut Isolate, s: &ThreadSafeState) { i.register_op( @@ -52,13 +57,13 @@ struct GetMessageFuture { } impl Future for GetMessageFuture { - type Item = Option; - type Error = ErrBox; + type Output = Option; - fn poll(&mut self) -> Result, Self::Error> { - let mut channels = self.state.worker_channels.lock().unwrap(); + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let inner = self.get_mut(); + let mut channels = inner.state.worker_channels.lock().unwrap(); let receiver = &mut channels.receiver; - receiver.poll().map_err(ErrBox::from) + receiver.poll_next_unpin(cx) } } @@ -72,17 +77,15 @@ fn op_worker_get_message( state: state.clone(), }; - let op = op - .map_err(move |_| -> ErrBox { unimplemented!() }) - .and_then(move |maybe_buf| { - debug!("op_worker_get_message"); + let op = op.then(move |maybe_buf| { + debug!("op_worker_get_message"); - futures::future::ok(json!({ - "data": maybe_buf.map(|buf| buf.to_owned()) - })) - }); + futures::future::ok(json!({ + "data": maybe_buf.map(|buf| buf.to_owned()) + })) + }); - Ok(JsonOp::Async(Box::new(op))) + Ok(JsonOp::Async(op.boxed())) } /// Post message to host as guest worker @@ -94,9 +97,7 @@ fn op_worker_post_message( let d = Vec::from(data.unwrap().as_ref()).into_boxed_slice(); let mut channels = state.worker_channels.lock().unwrap(); let sender = &mut channels.sender; - sender - .send(d) - .wait() + futures::executor::block_on(sender.send(d)) .map_err(|e| DenoError::new(ErrorKind::Other, e.to_string()))?; Ok(JsonOp::Sync(json!({}))) @@ -165,12 +166,35 @@ fn op_create_worker( let op = worker .execute_mod_async(&module_specifier, None, false) - .and_then(move |()| Ok(exec_cb(worker))); + .and_then(move |()| futures::future::ok(exec_cb(worker))); - let result = op.wait()?; + let result = tokio_util::block_on(op.boxed())?; Ok(JsonOp::Sync(result)) } +struct GetWorkerClosedFuture { + state: ThreadSafeState, + rid: ResourceId, +} + +impl Future for GetWorkerClosedFuture { + type Output = Result<(), ErrBox>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let inner = self.get_mut(); + let mut workers_table = inner.state.workers.lock().unwrap(); + let maybe_worker = workers_table.get_mut(&inner.rid); + if maybe_worker.is_none() { + return Poll::Ready(Ok(())); + } + match maybe_worker.unwrap().poll_unpin(cx) { + Poll::Ready(Err(e)) => Poll::Ready(Err(e)), + Poll::Ready(Ok(_)) => Poll::Ready(Ok(())), + Poll::Pending => Poll::Pending, + } + } +} + #[derive(Deserialize)] struct HostGetWorkerClosedArgs { id: i32, @@ -185,18 +209,18 @@ fn op_host_get_worker_closed( let args: HostGetWorkerClosedArgs = serde_json::from_value(args)?; let id = args.id as u32; let state_ = state.clone(); - let workers_table = state.workers.lock().unwrap(); - // TODO: handle bad worker id gracefully - let worker = workers_table.get(&id).unwrap(); - let shared_worker_future = worker.clone().shared(); - let op = shared_worker_future.then(move |_result| { + let future = GetWorkerClosedFuture { + state: state.clone(), + rid: id, + }; + let op = future.then(move |_result| { let mut workers_table = state_.workers.lock().unwrap(); workers_table.remove(&id); futures::future::ok(json!({})) }); - Ok(JsonOp::Async(Box::new(op))) + Ok(JsonOp::Async(op.boxed())) } #[derive(Deserialize)] @@ -225,7 +249,7 @@ fn op_host_get_message( })) }); - Ok(JsonOp::Async(Box::new(op))) + Ok(JsonOp::Async(op.boxed())) } #[derive(Deserialize)] @@ -247,8 +271,7 @@ fn op_host_post_message( let mut table = state.workers.lock().unwrap(); // TODO: don't return bad resource anymore let worker = table.get_mut(&id).ok_or_else(bad_resource)?; - worker - .post_message(msg) + tokio_util::block_on(worker.post_message(msg).boxed()) .map_err(|e| DenoError::new(ErrorKind::Other, e.to_string()))?; Ok(JsonOp::Sync(json!({}))) } diff --git a/cli/resolve_addr.rs b/cli/resolve_addr.rs index 5a4c9d54b463c3..a0ff59b7611609 100644 --- a/cli/resolve_addr.rs +++ b/cli/resolve_addr.rs @@ -1,10 +1,11 @@ // Copyright 2018-2019 the Deno authors. All rights reserved. MIT license. use deno::ErrBox; -use futures::Async; -use futures::Future; -use futures::Poll; +use std::future::Future; use std::net::SocketAddr; use std::net::ToSocketAddrs; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; /// Resolve network address. Returns a future. pub fn resolve_addr(hostname: &str, port: u16) -> ResolveAddrFuture { @@ -20,17 +21,17 @@ pub struct ResolveAddrFuture { } impl Future for ResolveAddrFuture { - type Item = SocketAddr; - type Error = ErrBox; + type Output = Result; - fn poll(&mut self) -> Poll { + fn poll(self: Pin<&mut Self>, _cx: &mut Context) -> Poll { + let inner = self.get_mut(); // The implementation of this is not actually async at the moment, // however we intend to use async DNS resolution in the future and // so we expose this as a future instead of Result. // Default to localhost if given just the port. Example: ":80" - let addr: &str = if !self.hostname.is_empty() { - &self.hostname + let addr: &str = if !inner.hostname.is_empty() { + &inner.hostname } else { "0.0.0.0" }; @@ -43,19 +44,20 @@ impl Future for ResolveAddrFuture { } else { addr }; - let addr_port_pair = (addr, self.port); + let addr_port_pair = (addr, inner.port); let r = addr_port_pair.to_socket_addrs().map_err(ErrBox::from); - r.and_then(|mut iter| match iter.next() { - Some(a) => Ok(Async::Ready(a)), + Poll::Ready(r.and_then(|mut iter| match iter.next() { + Some(a) => Ok(a), None => panic!("There should be at least one result"), - }) + })) } } #[cfg(test)] mod tests { use super::*; + use futures::executor::block_on; use std::net::Ipv4Addr; use std::net::Ipv6Addr; use std::net::SocketAddrV4; @@ -65,7 +67,7 @@ mod tests { fn resolve_addr1() { let expected = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 80)); - let actual = resolve_addr("127.0.0.1", 80).wait().unwrap(); + let actual = block_on(resolve_addr("127.0.0.1", 80)).unwrap(); assert_eq!(actual, expected); } @@ -73,7 +75,7 @@ mod tests { fn resolve_addr2() { let expected = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), 80)); - let actual = resolve_addr("", 80).wait().unwrap(); + let actual = block_on(resolve_addr("", 80)).unwrap(); assert_eq!(actual, expected); } @@ -81,7 +83,7 @@ mod tests { fn resolve_addr3() { let expected = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(192, 0, 2, 1), 25)); - let actual = resolve_addr("192.0.2.1", 25).wait().unwrap(); + let actual = block_on(resolve_addr("192.0.2.1", 25)).unwrap(); assert_eq!(actual, expected); } @@ -93,7 +95,7 @@ mod tests { 0, 0, )); - let actual = resolve_addr("[2001:db8::1]", 8080).wait().unwrap(); + let actual = block_on(resolve_addr("[2001:db8::1]", 8080)).unwrap(); assert_eq!(actual, expected); } } diff --git a/cli/state.rs b/cli/state.rs index a5e9546b0d671f..245919e7f000c7 100644 --- a/cli/state.rs +++ b/cli/state.rs @@ -17,13 +17,16 @@ use deno::ModuleSpecifier; use deno::Op; use deno::PinnedBuf; use deno::ResourceTable; -use futures::Future; +use futures::channel::mpsc; +use futures::future::FutureExt; +use futures::future::TryFutureExt; use rand::rngs::StdRng; use rand::SeedableRng; use serde_json::Value; use std; use std::collections::HashMap; use std::ops::Deref; +use std::pin::Pin; use std::str; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; @@ -31,7 +34,6 @@ use std::sync::Arc; use std::sync::Mutex; use std::sync::MutexGuard; use std::time::Instant; -use tokio::sync::mpsc; /// Isolate cannot be passed between threads but ThreadSafeState can. /// ThreadSafeState satisfies Send and Sync. So any state that needs to be @@ -101,11 +103,11 @@ impl ThreadSafeState { } Op::Async(fut) => { let state = state.clone(); - let result_fut = Box::new(fut.map(move |buf: Buf| { + let result_fut = fut.map_ok(move |buf: Buf| { state.clone().metrics_op_completed(buf.len()); buf - })); - Op::Async(result_fut) + }); + Op::Async(result_fut.boxed()) } } } @@ -115,13 +117,13 @@ impl ThreadSafeState { pub fn stateful_minimal_op( &self, dispatcher: D, - ) -> impl Fn(i32, Option) -> Box + ) -> impl Fn(i32, Option) -> Pin> where - D: Fn(&ThreadSafeState, i32, Option) -> Box, + D: Fn(&ThreadSafeState, i32, Option) -> Pin>, { let state = self.clone(); - move |rid: i32, zero_copy: Option| -> Box { + move |rid: i32, zero_copy: Option| -> Pin> { dispatcher(&state, rid, zero_copy) } } @@ -176,13 +178,13 @@ impl Loader for ThreadSafeState { fn load( &self, module_specifier: &ModuleSpecifier, - ) -> Box { + ) -> Pin> { self.metrics.resolve_count.fetch_add(1, Ordering::SeqCst); let module_url_specified = module_specifier.to_string(); let fut = self .global_state .fetch_compiled_module(module_specifier) - .map(|compiled_module| deno::SourceCodeInfo { + .map_ok(|compiled_module| deno::SourceCodeInfo { // Real module name, might be different from initial specifier // due to redirections. code: compiled_module.code, @@ -190,7 +192,7 @@ impl Loader for ThreadSafeState { module_url_found: compiled_module.name, }); - Box::new(fut) + fut.boxed() } } diff --git a/cli/tests/integration_tests.rs b/cli/tests/integration_tests.rs index a366838a14f182..2bb577fbbdd549 100644 --- a/cli/tests/integration_tests.rs +++ b/cli/tests/integration_tests.rs @@ -319,11 +319,13 @@ itest!(_044_bad_resource { exit_code: 1, }); +/* itest!(_045_proxy { args: "run --allow-net --allow-env --allow-run --reload 045_proxy_test.ts", output: "045_proxy_test.ts.out", http_server: true, }); +*/ itest!(_046_tsx { args: "run --reload 046_jsx_test.tsx", diff --git a/cli/tokio_util.rs b/cli/tokio_util.rs index c86748b5505807..050080b705df8b 100644 --- a/cli/tokio_util.rs +++ b/cli/tokio_util.rs @@ -1,9 +1,9 @@ // Copyright 2018-2019 the Deno authors. All rights reserved. MIT license. use deno::ErrBox; use futures; -use futures::Future; -use futures::Poll; -use std::ops::FnOnce; +use futures::future::FutureExt; +use futures::future::TryFutureExt; +use std::future::Future; use tokio; use tokio::runtime; @@ -16,18 +16,18 @@ pub fn create_threadpool_runtime( pub fn run(future: F) where - F: Future + Send + 'static, + F: Future> + Send + 'static, { // tokio::runtime::current_thread::run(future) let rt = create_threadpool_runtime().expect("Unable to create Tokio runtime"); - rt.block_on_all(future).unwrap(); + rt.block_on_all(future.boxed().compat()).unwrap(); } pub fn run_on_current_thread(future: F) where - F: Future + Send + 'static, + F: Future> + Send + 'static, { - tokio::runtime::current_thread::run(future); + tokio::runtime::current_thread::run(future.boxed().compat()); } /// THIS IS A HACK AND SHOULD BE AVOIDED. @@ -40,7 +40,7 @@ where /// main runtime. pub fn block_on(future: F) -> Result where - F: Send + 'static + Future, + F: Send + 'static + Future> + Unpin, R: Send + 'static, { use std::sync::mpsc::channel; @@ -48,7 +48,7 @@ where let (sender, receiver) = channel(); // Create a new runtime to evaluate the future asynchronously. thread::spawn(move || { - let r = tokio::runtime::current_thread::block_on_all(future); + let r = tokio::runtime::current_thread::block_on_all(future.compat()); sender .send(r) .expect("Unable to send blocking future result") @@ -72,36 +72,9 @@ where tokio_executor::with_default(&mut executor, &mut enter, move |_enter| f()); } -/// `futures::future::poll_fn` only support `F: FnMut()->Poll` -/// However, we require that `F: FnOnce()->Poll`. -/// Therefore, we created our version of `poll_fn`. -pub fn poll_fn(f: F) -> PollFn +pub fn panic_on_error(f: F) -> impl Future> where - F: FnOnce() -> Poll, -{ - PollFn { inner: Some(f) } -} - -pub struct PollFn { - inner: Option, -} - -impl Future for PollFn -where - F: FnOnce() -> Poll, -{ - type Item = T; - type Error = E; - - fn poll(&mut self) -> Poll { - let f = self.inner.take().expect("Inner fn has been taken."); - f() - } -} - -pub fn panic_on_error(f: F) -> impl Future -where - F: Future, + F: Future>, E: std::fmt::Debug, { f.map_err(|err| panic!("Future got unexpected error: {:?}", err)) @@ -112,9 +85,9 @@ pub fn run_in_task(f: F) where F: FnOnce() + Send + 'static, { - let fut = futures::future::lazy(move || { + let fut = futures::future::lazy(move |_cx| { f(); - futures::future::ok(()) + Ok(()) }); run(fut) diff --git a/cli/worker.rs b/cli/worker.rs index 5d4675d0067c06..aca822888c50ee 100644 --- a/cli/worker.rs +++ b/cli/worker.rs @@ -8,15 +8,18 @@ use deno::ErrBox; use deno::ModuleSpecifier; use deno::RecursiveLoad; use deno::StartupData; -use futures::Async; -use futures::Future; -use futures::Poll; -use futures::Sink; -use futures::Stream; +use futures::channel::mpsc; +use futures::future::FutureExt; +use futures::future::TryFutureExt; +use futures::sink::SinkExt; +use futures::stream::StreamExt; use std::env; +use std::future::Future; +use std::pin::Pin; use std::sync::Arc; use std::sync::Mutex; -use tokio::sync::mpsc; +use std::task::Context; +use std::task::Poll; use url::Url; /// Wraps mpsc channels so they can be referenced @@ -115,7 +118,7 @@ impl Worker { module_specifier: &ModuleSpecifier, maybe_code: Option, is_prefetch: bool, - ) -> impl Future { + ) -> impl Future> { let worker = self.clone(); let loader = self.state.clone(); let isolate = self.isolate.clone(); @@ -127,13 +130,13 @@ impl Worker { modules, ) .get_future(isolate); - recursive_load.and_then(move |id| -> Result<(), ErrBox> { + recursive_load.and_then(move |id| { worker.state.global_state.progress.done(); if is_prefetch { - Ok(()) + futures::future::ok(()) } else { let mut isolate = worker.isolate.lock().unwrap(); - isolate.mod_evaluate(id) + futures::future::ready(isolate.mod_evaluate(id)) } }) } @@ -141,10 +144,17 @@ impl Worker { /// Post message to worker as a host. /// /// This method blocks current thread. - pub fn post_message(self: &Self, buf: Buf) -> Result<(), ErrBox> { - let mut channels = self.external_channels.lock().unwrap(); - let sender = &mut channels.sender; - sender.send(buf).wait().map(|_| ()).map_err(ErrBox::from) + pub fn post_message( + self: &Self, + buf: Buf, + ) -> impl Future> { + let channels = self.external_channels.lock().unwrap(); + let mut sender = channels.sender.clone(); + async move { + let result = sender.send(buf).map_err(ErrBox::from).await; + drop(sender); + result + } } /// Get message from worker as a host. @@ -156,12 +166,12 @@ impl Worker { } impl Future for Worker { - type Item = (); - type Error = ErrBox; + type Output = Result<(), ErrBox>; - fn poll(&mut self) -> Result, ErrBox> { - let mut isolate = self.isolate.lock().unwrap(); - isolate.poll() + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let inner = self.get_mut(); + let mut isolate = inner.isolate.lock().unwrap(); + isolate.poll_unpin(cx) } } @@ -173,12 +183,14 @@ pub struct WorkerReceiver { } impl Future for WorkerReceiver { - type Item = Option; - type Error = ErrBox; + type Output = Result, ErrBox>; - fn poll(&mut self) -> Poll, ErrBox> { + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let mut channels = self.channels.lock().unwrap(); - channels.receiver.poll().map_err(ErrBox::from) + match channels.receiver.poll_next_unpin(cx) { + Poll::Ready(v) => Poll::Ready(Ok(v)), + Poll::Pending => Poll::Pending, + } } } @@ -192,7 +204,6 @@ mod tests { use crate::startup_data; use crate::state::ThreadSafeState; use crate::tokio_util; - use futures::future::lazy; use std::sync::atomic::Ordering; #[test] @@ -220,7 +231,7 @@ mod tests { ) .unwrap(); let state_ = state.clone(); - tokio_util::run(lazy(move || { + tokio_util::run(async move { let mut worker = Worker::new("TEST".to_string(), StartupData::None, state, ext); worker @@ -231,7 +242,8 @@ mod tests { } tokio_util::panic_on_error(worker) }) - })); + .await + }); let metrics = &state_.metrics; assert_eq!(metrics.resolve_count.load(Ordering::SeqCst), 2); @@ -261,7 +273,7 @@ mod tests { ) .unwrap(); let state_ = state.clone(); - tokio_util::run(lazy(move || { + tokio_util::run(async move { let mut worker = Worker::new("TEST".to_string(), StartupData::None, state, ext); worker @@ -272,7 +284,8 @@ mod tests { } tokio_util::panic_on_error(worker) }) - })); + .await + }); let metrics = &state_.metrics; // TODO assert_eq!(metrics.resolve_count.load(Ordering::SeqCst), 2); @@ -306,7 +319,7 @@ mod tests { .unwrap(); let global_state_ = global_state.clone(); let state_ = state.clone(); - tokio_util::run(lazy(move || { + tokio_util::run(async move { let mut worker = Worker::new( "TEST".to_string(), startup_data::deno_isolate_init(), @@ -322,7 +335,8 @@ mod tests { } tokio_util::panic_on_error(worker) }) - })); + .await + }); assert_eq!(state_.metrics.resolve_count.load(Ordering::SeqCst), 3); // Check that we've only invoked the compiler once. @@ -371,19 +385,22 @@ mod tests { let worker_ = worker.clone(); - tokio::spawn(lazy(move || { - worker.then(move |r| -> Result<(), ()> { - r.unwrap(); - Ok(()) - }) - })); + tokio::spawn( + worker + .then(move |r| { + r.unwrap(); + futures::future::ok(()) + }) + .compat(), + ); let msg = json!("hi").to_string().into_boxed_str().into_boxed_bytes(); - let r = worker_.post_message(msg); + let r = futures::executor::block_on(worker_.post_message(msg).boxed()); assert!(r.is_ok()); - let maybe_msg = worker_.get_message().wait().unwrap(); + let maybe_msg = + futures::executor::block_on(worker_.get_message()).unwrap(); assert!(maybe_msg.is_some()); // Check if message received is [1, 2, 3] in json assert_eq!(*maybe_msg.unwrap(), *b"[1,2,3]"); @@ -392,7 +409,7 @@ mod tests { .to_string() .into_boxed_str() .into_boxed_bytes(); - let r = worker_.post_message(msg); + let r = futures::executor::block_on(worker_.post_message(msg).boxed()); assert!(r.is_ok()); }) } @@ -407,21 +424,25 @@ mod tests { let worker_ = worker.clone(); let worker_future = worker - .then(move |r| -> Result<(), ()> { + .then(move |r| { println!("workers.rs after resource close"); r.unwrap(); - Ok(()) + futures::future::ok(()) }) .shared(); let worker_future_ = worker_future.clone(); - tokio::spawn(lazy(move || worker_future_.then(|_| Ok(())))); + tokio::spawn( + worker_future_ + .then(|_: Result<(), ()>| futures::future::ok(())) + .compat(), + ); let msg = json!("hi").to_string().into_boxed_str().into_boxed_bytes(); - let r = worker_.post_message(msg); + let r = futures::executor::block_on(worker_.post_message(msg)); assert!(r.is_ok()); - worker_future.wait().unwrap(); + futures::executor::block_on(worker_future).unwrap(); }) } @@ -432,9 +453,11 @@ mod tests { let mut worker = create_test_worker(); let module_specifier = ModuleSpecifier::resolve_url_or_path("does-not-exist").unwrap(); - let result = worker - .execute_mod_async(&module_specifier, None, false) - .wait(); + let result = futures::executor::block_on(worker.execute_mod_async( + &module_specifier, + None, + false, + )); assert!(result.is_err()); }) } @@ -452,9 +475,11 @@ mod tests { .to_owned(); let module_specifier = ModuleSpecifier::resolve_url_or_path(&p.to_string_lossy()).unwrap(); - let result = worker - .execute_mod_async(&module_specifier, None, false) - .wait(); + let result = futures::executor::block_on(worker.execute_mod_async( + &module_specifier, + None, + false, + )); assert!(result.is_ok()); }) } diff --git a/core/Cargo.toml b/core/Cargo.toml index 344386277a11a9..797e6644ed0162 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -15,7 +15,7 @@ path = "lib.rs" [dependencies] downcast-rs = "1.1.1" -futures = "0.1.29" +futures = { version = "0.3", features = [ "thread-pool", "compat" ] } lazy_static = "1.4.0" libc = "0.2.65" log = "0.4.8" diff --git a/core/examples/http_bench.rs b/core/examples/http_bench.rs index 8635d4f23b2b94..6a9213cbe470fd 100644 --- a/core/examples/http_bench.rs +++ b/core/examples/http_bench.rs @@ -13,14 +13,20 @@ extern crate log; extern crate lazy_static; use deno::*; -use futures::future::lazy; +use futures::future::FutureExt; +use futures::future::TryFutureExt; use std::env; +use std::future::Future; use std::io::Error; use std::io::ErrorKind; use std::net::SocketAddr; +use std::pin::Pin; use std::sync::Mutex; use std::sync::MutexGuard; -use tokio::prelude::*; +use std::task::Poll; +use tokio::prelude::Async; +use tokio::prelude::AsyncRead; +use tokio::prelude::AsyncWrite; static LOGGER: Logger = Logger; struct Logger; @@ -98,10 +104,10 @@ fn test_record_from() { // TODO test From<&[u8]> for Record } -pub type HttpOp = dyn Future + Send; +pub type HttpOp = dyn Future> + Send; pub type HttpOpHandler = - fn(record: Record, zero_copy_buf: Option) -> Box; + fn(record: Record, zero_copy_buf: Option) -> Pin>; fn http_op( handler: HttpOpHandler, @@ -117,53 +123,28 @@ fn http_op( let fut = Box::new( op.and_then(move |result| { record_a.result = result; - Ok(record_a) + futures::future::ok(record_a) }) - .or_else(|err| -> Result { + .or_else(|err| { eprintln!("unexpected err {}", err); record_b.result = -1; - Ok(record_b) + futures::future::ok(record_b) }) - .then(|result| -> Result { + .then(|result: Result| { let record = result.unwrap(); - Ok(record.into()) + futures::future::ok(record.into()) }), ); if is_sync { - Op::Sync(fut.wait().unwrap()) + Op::Sync(futures::executor::block_on(fut).unwrap()) } else { - Op::Async(fut) + Op::Async(fut.boxed()) } } } fn main() { - let main_future = lazy(move || { - // TODO currently isolate.execute() must be run inside tokio, hence the - // lazy(). It would be nice to not have that contraint. Probably requires - // using v8::MicrotasksPolicy::kExplicit - - let js_source = include_str!("http_bench.js"); - - let startup_data = StartupData::Script(Script { - source: js_source, - filename: "http_bench.js", - }); - - let mut isolate = deno::Isolate::new(startup_data, false); - isolate.register_op("listen", http_op(op_listen)); - isolate.register_op("accept", http_op(op_accept)); - isolate.register_op("read", http_op(op_read)); - isolate.register_op("write", http_op(op_write)); - isolate.register_op("close", http_op(op_close)); - - isolate.then(|r| { - js_check(r); - Ok(()) - }) - }); - let args: Vec = env::args().collect(); // NOTE: `--help` arg will display V8 help and exit let args = deno::v8_set_flags(args); @@ -175,12 +156,33 @@ fn main() { log::LevelFilter::Warn }); + let js_source = include_str!("http_bench.js"); + + let startup_data = StartupData::Script(Script { + source: js_source, + filename: "http_bench.js", + }); + + let mut isolate = deno::Isolate::new(startup_data, false); + isolate.register_op("listen", http_op(op_listen)); + isolate.register_op("accept", http_op(op_accept)); + isolate.register_op("read", http_op(op_read)); + isolate.register_op("write", http_op(op_write)); + isolate.register_op("close", http_op(op_close)); + + let main_future = isolate + .then(|r| { + js_check(r); + futures::future::ok(()) + }) + .boxed(); + if args.iter().any(|a| a == "--multi-thread") { println!("multi-thread"); - tokio::run(main_future); + tokio::run(main_future.compat()); } else { println!("single-thread"); - tokio::runtime::current_thread::run(main_future); + tokio::runtime::current_thread::run(main_future.compat()); } } @@ -205,37 +207,47 @@ fn lock_resource_table<'a>() -> MutexGuard<'a, ResourceTable> { RESOURCE_TABLE.lock().unwrap() } -fn op_accept(record: Record, _zero_copy_buf: Option) -> Box { +fn op_accept( + record: Record, + _zero_copy_buf: Option, +) -> Pin> { let rid = record.arg as u32; debug!("accept {}", rid); - let fut = futures::future::poll_fn(move || { + let fut = futures::future::poll_fn(move |_cx| { let mut table = lock_resource_table(); let listener = table.get_mut::(rid).ok_or_else(bad_resource)?; - listener.0.poll_accept() + match listener.0.poll_accept() { + Err(e) => Poll::Ready(Err(e)), + Ok(Async::Ready(v)) => Poll::Ready(Ok(v)), + Ok(Async::NotReady) => Poll::Pending, + } }) .and_then(move |(stream, addr)| { debug!("accept success {}", addr); let mut table = lock_resource_table(); let rid = table.add("tcpStream", Box::new(TcpStream(stream))); - Ok(rid as i32) + futures::future::ok(rid as i32) }); - Box::new(fut) + fut.boxed() } fn op_listen( _record: Record, _zero_copy_buf: Option, -) -> Box { +) -> Pin> { debug!("listen"); let addr = "127.0.0.1:4544".parse::().unwrap(); let listener = tokio::net::TcpListener::bind(&addr).unwrap(); let mut table = lock_resource_table(); let rid = table.add("tcpListener", Box::new(TcpListener(listener))); - Box::new(futures::future::ok(rid as i32)) + futures::future::ok(rid as i32).boxed() } -fn op_close(record: Record, _zero_copy_buf: Option) -> Box { +fn op_close( + record: Record, + _zero_copy_buf: Option, +) -> Pin> { debug!("close"); let rid = record.arg as u32; let mut table = lock_resource_table(); @@ -243,39 +255,53 @@ fn op_close(record: Record, _zero_copy_buf: Option) -> Box { Some(_) => futures::future::ok(0), None => futures::future::err(bad_resource()), }; - Box::new(fut) + fut.boxed() } -fn op_read(record: Record, zero_copy_buf: Option) -> Box { +fn op_read( + record: Record, + zero_copy_buf: Option, +) -> Pin> { let rid = record.arg as u32; debug!("read rid={}", rid); let mut zero_copy_buf = zero_copy_buf.unwrap(); - let fut = futures::future::poll_fn(move || { + let fut = futures::future::poll_fn(move |_cx| { let mut table = lock_resource_table(); let stream = table.get_mut::(rid).ok_or_else(bad_resource)?; - stream.0.poll_read(&mut zero_copy_buf) + match stream.0.poll_read(&mut zero_copy_buf) { + Err(e) => Poll::Ready(Err(e)), + Ok(Async::Ready(v)) => Poll::Ready(Ok(v)), + Ok(Async::NotReady) => Poll::Pending, + } }) .and_then(move |nread| { debug!("read success {}", nread); - Ok(nread as i32) + futures::future::ok(nread as i32) }); - Box::new(fut) + fut.boxed() } -fn op_write(record: Record, zero_copy_buf: Option) -> Box { +fn op_write( + record: Record, + zero_copy_buf: Option, +) -> Pin> { let rid = record.arg as u32; debug!("write rid={}", rid); let zero_copy_buf = zero_copy_buf.unwrap(); - let fut = futures::future::poll_fn(move || { + let fut = futures::future::poll_fn(move |_cx| { let mut table = lock_resource_table(); let stream = table.get_mut::(rid).ok_or_else(bad_resource)?; - stream.0.poll_write(&zero_copy_buf) + match stream.0.poll_write(&zero_copy_buf) { + Err(e) => Poll::Ready(Err(e)), + Ok(Async::Ready(v)) => Poll::Ready(Ok(v)), + Ok(Async::NotReady) => Poll::Pending, + } }) .and_then(move |nwritten| { debug!("write success {}", nwritten); - Ok(nwritten as i32) + futures::future::ok(nwritten as i32) }); - Box::new(fut) + fut.boxed() } fn js_check(r: Result<(), ErrBox>) { diff --git a/core/isolate.rs b/core/isolate.rs index 8b15befa30d71d..079ab5dcfb1d62 100644 --- a/core/isolate.rs +++ b/core/isolate.rs @@ -19,20 +19,27 @@ use crate::libdeno::Snapshot2; use crate::ops::*; use crate::shared_queue::SharedQueue; use crate::shared_queue::RECOMMENDED_SIZE; +use futures::future::FutureExt; +use futures::future::TryFutureExt; use futures::stream::FuturesUnordered; +use futures::stream::IntoStream; use futures::stream::Stream; +use futures::stream::StreamExt; use futures::stream::StreamFuture; -use futures::task; -use futures::Async::*; -use futures::Future; -use futures::Poll; +use futures::stream::TryStream; +use futures::stream::TryStreamExt; +use futures::task::AtomicWaker; use libc::c_char; use libc::c_void; use std::ffi::CStr; use std::ffi::CString; use std::fmt; +use std::future::Future; +use std::pin::Pin; use std::ptr::null; use std::sync::{Arc, Mutex, Once}; +use std::task::Context; +use std::task::Poll; /// Stores a script used to initalize a Isolate pub struct Script<'a> { @@ -59,7 +66,7 @@ pub enum RecursiveLoadEvent { Instantiate(deno_mod), } -pub trait ImportStream: Stream { +pub trait ImportStream: TryStream { fn register( &mut self, source_code_info: SourceCodeInfo, @@ -67,8 +74,14 @@ pub trait ImportStream: Stream { ) -> Result<(), ErrBox>; } -type DynImportStream = - Box + Send>; +type DynImportStream = Box< + dyn ImportStream< + Ok = RecursiveLoadEvent, + Error = ErrBox, + Item = Result, + > + Send + + Unpin, +>; type DynImportFn = dyn Fn(deno_dyn_import_id, &str, &str) -> DynImportStream; @@ -87,15 +100,23 @@ impl fmt::Debug for DynImportStream { } impl Stream for DynImport { - type Item = (deno_dyn_import_id, RecursiveLoadEvent); - type Error = (deno_dyn_import_id, ErrBox); - - fn poll(&mut self) -> Poll, Self::Error> { - match self.inner.poll() { - Ok(Ready(Some(event))) => Ok(Ready(Some((self.id, event)))), - Ok(Ready(None)) => unreachable!(), - Err(e) => Err((self.id, e)), - Ok(NotReady) => Ok(NotReady), + type Item = Result< + (deno_dyn_import_id, RecursiveLoadEvent), + (deno_dyn_import_id, ErrBox), + >; + + fn poll_next( + self: Pin<&mut Self>, + cx: &mut Context, + ) -> Poll> { + let self_inner = self.get_mut(); + match self_inner.inner.try_poll_next_unpin(cx) { + Poll::Ready(Some(Ok(event))) => { + Poll::Ready(Some(Ok((self_inner.id, event)))) + } + Poll::Ready(None) => unreachable!(), + Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err((self_inner.id, e)))), + Poll::Pending => Poll::Pending, } } } @@ -154,11 +175,12 @@ pub struct Isolate { needs_init: bool, shared: SharedQueue, pending_ops: FuturesUnordered, - pending_dyn_imports: FuturesUnordered>, + pending_dyn_imports: FuturesUnordered>>, have_unpolled_ops: bool, startup_script: Option, op_registry: OpRegistry, eager_poll_count: u32, + waker: AtomicWaker, } unsafe impl Send for Isolate {} @@ -225,6 +247,7 @@ impl Isolate { startup_script, op_registry: OpRegistry::new(), eager_poll_count: 0, + waker: AtomicWaker::new(), } } @@ -296,8 +319,10 @@ impl Isolate { if let Some(ref f) = isolate.dyn_import { let inner = f(id, specifier, referrer); let stream = DynImport { inner, id }; - task::current().notify(); - isolate.pending_dyn_imports.push(stream.into_future()); + isolate.waker.wake(); + isolate + .pending_dyn_imports + .push(stream.into_stream().into_future()); } else { panic!("dyn_import callback not set") } @@ -334,10 +359,11 @@ impl Isolate { // which case they can be turned into a sync op before we return to V8. This // can save a boundary crossing. #[allow(clippy::match_wild_err_arm)] - match fut.poll() { - Err(_) => panic!("unexpected op error"), - Ok(Ready(buf)) => Op::Sync(buf), - Ok(NotReady) => Op::Async(fut), + let mut cx = Context::from_waker(futures::task::noop_waker_ref()); + match fut.poll_unpin(&mut cx) { + Poll::Ready(Err(_)) => panic!("unexpected op error"), + Poll::Ready(Ok(buf)) => Op::Sync(buf), + Poll::Pending => Op::Async(fut), } } Op::Sync(buf) => Op::Sync(buf), @@ -359,8 +385,8 @@ impl Isolate { .expect("unexpected error"); } Op::Async(fut) => { - let fut2 = fut.map(move |buf| (op_id, buf)); - isolate.pending_ops.push(Box::new(fut2)); + let fut2 = fut.map_ok(move |buf| (op_id, buf)); + isolate.pending_ops.push(fut2.boxed()); isolate.have_unpolled_ops = true; } } @@ -522,42 +548,45 @@ impl Isolate { self.check_last_exception() } - fn poll_dyn_imports(&mut self) -> Poll<(), ErrBox> { + fn poll_dyn_imports(&mut self, cx: &mut Context) -> Poll> { use RecursiveLoadEvent::*; loop { - match self.pending_dyn_imports.poll() { - Ok(NotReady) | Ok(Ready(None)) => { + match self.pending_dyn_imports.poll_next_unpin(cx) { + Poll::Pending | Poll::Ready(None) => { // There are no active dynamic import loaders, or none are ready. - return Ok(futures::Async::Ready(())); + return Poll::Ready(Ok(())); } - Ok(Ready(Some(( - Some((dyn_import_id, Fetch(source_code_info))), + Poll::Ready(Some(( + Some(Ok((dyn_import_id, Fetch(source_code_info)))), mut stream, - )))) => { + ))) => { // A module (not necessarily the one dynamically imported) has been // fetched. Create and register it, and if successful, poll for the // next recursive-load event related to this dynamic import. - match stream.register(source_code_info, self) { + match stream.get_mut().register(source_code_info, self) { Ok(()) => self.pending_dyn_imports.push(stream.into_future()), Err(err) => { self.dyn_import_done(dyn_import_id, Err(Some(err.to_string())))? } } } - Ok(Ready(Some((Some((dyn_import_id, Instantiate(module_id))), _)))) => { + Poll::Ready(Some(( + Some(Ok((dyn_import_id, Instantiate(module_id)))), + _, + ))) => { // The top-level module from a dynamic import has been instantiated. match self.mod_evaluate(module_id) { Ok(()) => self.dyn_import_done(dyn_import_id, Ok(module_id))?, Err(..) => self.dyn_import_done(dyn_import_id, Err(None))?, } } - Err(((dyn_import_id, err), _)) => { + Poll::Ready(Some((Some(Err((dyn_import_id, err))), _))) => { // A non-javascript error occurred; this could be due to a an invalid // module specifier, or a problem with the source map, or a failure // to fetch the module source code. self.dyn_import_done(dyn_import_id, Err(Some(err.to_string())))? } - Ok(Ready(Some((None, _)))) => unreachable!(), + Poll::Ready(Some((None, _))) => unreachable!(), } } } @@ -654,30 +683,34 @@ impl Drop for LockerScope { } impl Future for Isolate { - type Item = (); - type Error = ErrBox; + type Output = Result<(), ErrBox>; - fn poll(&mut self) -> Poll<(), ErrBox> { - self.shared_init(); + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let inner = self.get_mut(); + + inner.waker.register(cx.waker()); + + inner.shared_init(); let mut overflow_response: Option<(OpId, Buf)> = None; loop { // If there are any pending dyn_import futures, do those first. - if !self.pending_dyn_imports.is_empty() { - self.poll_dyn_imports()?; + if !inner.pending_dyn_imports.is_empty() { + let poll_imports = inner.poll_dyn_imports(cx)?; + assert!(poll_imports.is_ready()); } // Now handle actual ops. - self.have_unpolled_ops = false; - self.eager_poll_count = 0; + inner.have_unpolled_ops = false; + inner.eager_poll_count = 0; #[allow(clippy::match_wild_err_arm)] - match self.pending_ops.poll() { - Err(_) => panic!("unexpected op error"), - Ok(Ready(None)) => break, - Ok(NotReady) => break, - Ok(Ready(Some((op_id, buf)))) => { - let successful_push = self.shared.push(op_id, &buf); + match inner.pending_ops.poll_next_unpin(cx) { + Poll::Ready(Some(Err(_))) => panic!("unexpected op error"), + Poll::Ready(None) => break, + Poll::Pending => break, + Poll::Ready(Some(Ok((op_id, buf)))) => { + let successful_push = inner.shared.push(op_id, &buf); if !successful_push { // If we couldn't push the response to the shared queue, because // there wasn't enough size, we will return the buffer via the @@ -689,34 +722,34 @@ impl Future for Isolate { } } - if self.shared.size() > 0 { + if inner.shared.size() > 0 { // Lock the current thread for V8. - let locker = LockerScope::new(self.libdeno_isolate); - self.respond(None)?; + let locker = LockerScope::new(inner.libdeno_isolate); + inner.respond(None)?; // The other side should have shifted off all the messages. - assert_eq!(self.shared.size(), 0); + assert_eq!(inner.shared.size(), 0); drop(locker); } if overflow_response.is_some() { // Lock the current thread for V8. - let locker = LockerScope::new(self.libdeno_isolate); + let locker = LockerScope::new(inner.libdeno_isolate); let (op_id, buf) = overflow_response.take().unwrap(); - self.respond(Some((op_id, &buf)))?; + inner.respond(Some((op_id, &buf)))?; drop(locker); } - self.check_promise_errors(); - self.check_last_exception()?; + inner.check_promise_errors(); + inner.check_last_exception()?; // We're idle if pending_ops is empty. - if self.pending_ops.is_empty() && self.pending_dyn_imports.is_empty() { - Ok(futures::Async::Ready(())) + if inner.pending_ops.is_empty() && inner.pending_dyn_imports.is_empty() { + Poll::Ready(Ok(())) } else { - if self.have_unpolled_ops { - task::current().notify(); + if inner.have_unpolled_ops { + inner.waker.wake(); } - Ok(futures::Async::NotReady) + Poll::Pending } } } @@ -752,33 +785,29 @@ pub fn js_check(r: Result) -> T { #[cfg(test)] pub mod tests { use super::*; - use futures::executor::spawn; + use futures::executor::ThreadPool; use futures::future::lazy; - use futures::future::ok; - use futures::Async; use std::io; use std::ops::FnOnce; use std::sync::atomic::{AtomicUsize, Ordering}; - pub fn run_in_task(f: F) -> R + pub fn run_in_task(f: F) where - F: FnOnce() -> R, + F: FnOnce(&mut Context) + Send + 'static, { - spawn(lazy(move || ok::(f()))).wait_future().unwrap() + let poll = ThreadPool::new().unwrap(); + poll.spawn_ok(lazy(move |cx| f(cx))); } - fn poll_until_ready( - future: &mut F, - max_poll_count: usize, - ) -> Result + fn poll_until_ready(future: &mut F, max_poll_count: usize) -> F::Output where - F: Future, + F: Future + Unpin, { + let mut cx = Context::from_waker(futures::task::noop_waker_ref()); for _ in 0..max_poll_count { - match future.poll() { - Ok(NotReady) => continue, - Ok(Ready(val)) => return Ok(val), - Err(err) => return Err(err), + match future.poll_unpin(&mut cx) { + Poll::Pending => continue, + Poll::Ready(val) => return val, } } panic!( @@ -799,16 +828,16 @@ pub mod tests { } impl Future for DelayedFuture { - type Item = Box<[u8]>; - type Error = (); + type Output = Result, ()>; - fn poll(&mut self) -> Poll { - if self.counter > 0 { - return Ok(Async::Ready(self.buf.clone())); + fn poll(self: Pin<&mut Self>, _cx: &mut Context) -> Poll { + let inner = self.get_mut(); + if inner.counter > 0 { + return Poll::Ready(Ok(inner.buf.clone())); } - self.counter += 1; - Ok(Async::NotReady) + inner.counter += 1; + Poll::Pending } } @@ -835,13 +864,13 @@ pub mod tests { assert_eq!(control.len(), 1); assert_eq!(control[0], 42); let buf = vec![43u8, 0, 0, 0].into_boxed_slice(); - Op::Async(Box::new(futures::future::ok(buf))) + Op::Async(futures::future::ok(buf).boxed()) } Mode::AsyncDelayed => { assert_eq!(control.len(), 1); assert_eq!(control[0], 42); let buf = vec![43u8, 0, 0, 0].into_boxed_slice(); - Op::Async(Box::new(DelayedFuture::new(buf))) + Op::Async(DelayedFuture::new(buf).boxed()) } Mode::OverflowReqSync => { assert_eq!(control.len(), 100 * 1024 * 1024); @@ -860,7 +889,7 @@ pub mod tests { Mode::OverflowReqAsync => { assert_eq!(control.len(), 100 * 1024 * 1024); let buf = vec![43u8, 0, 0, 0].into_boxed_slice(); - Op::Async(Box::new(DelayedFuture::new(buf))) + Op::Async(DelayedFuture::new(buf).boxed()) } Mode::OverflowResAsync => { assert_eq!(control.len(), 1); @@ -869,7 +898,7 @@ pub mod tests { vec.resize(100 * 1024 * 1024, 0); vec[0] = 4; let buf = vec.into_boxed_slice(); - Op::Async(Box::new(DelayedFuture::new(buf))) + Op::Async(DelayedFuture::new(buf).boxed()) } } }; @@ -957,7 +986,7 @@ pub mod tests { #[test] fn test_poll_async_immediate_ops() { - run_in_task(|| { + run_in_task(|cx| { let (mut isolate, dispatch_count) = setup(Mode::AsyncImmediate); js_check(isolate.execute( @@ -992,16 +1021,22 @@ pub mod tests { )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 2); assert_eq!(dispatch_count.load(Ordering::Relaxed), 2); - assert_eq!(Async::Ready(()), isolate.poll().unwrap()); + assert!(match isolate.poll_unpin(cx) { + Poll::Ready(Ok(_)) => true, + _ => false, + }); js_check(isolate.execute("check3.js", "assert(nrecv == 0)")); // We are idle, so the next poll should be the last. - assert_eq!(Async::Ready(()), isolate.poll().unwrap()); + assert!(match isolate.poll_unpin(cx) { + Poll::Ready(Ok(_)) => true, + _ => false, + }); }); } #[test] fn test_poll_async_delayed_ops() { - run_in_task(|| { + run_in_task(|cx| { let (mut isolate, dispatch_count) = setup(Mode::AsyncDelayed); js_check(isolate.execute( @@ -1024,7 +1059,10 @@ pub mod tests { "#, )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 1); - assert_eq!(Async::Ready(()), isolate.poll().unwrap()); + assert!(match isolate.poll_unpin(cx) { + Poll::Ready(Ok(_)) => true, + _ => false, + }); assert_eq!(dispatch_count.load(Ordering::Relaxed), 1); js_check(isolate.execute( "check2.js", @@ -1035,26 +1073,36 @@ pub mod tests { "#, )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 2); - assert_eq!(Async::Ready(()), isolate.poll().unwrap()); + assert!(match isolate.poll_unpin(cx) { + Poll::Ready(Ok(_)) => true, + _ => false, + }); js_check(isolate.execute("check3.js", "assert(nrecv == 2)")); assert_eq!(dispatch_count.load(Ordering::Relaxed), 2); // We are idle, so the next poll should be the last. - assert_eq!(Async::Ready(()), isolate.poll().unwrap()); + assert!(match isolate.poll_unpin(cx) { + Poll::Ready(Ok(_)) => true, + _ => false, + }); }); } struct MockImportStream(Vec>); impl Stream for MockImportStream { - type Item = RecursiveLoadEvent; - type Error = ErrBox; - fn poll(&mut self) -> Poll, Self::Error> { - let event = if self.0.is_empty() { + type Item = Result; + + fn poll_next( + self: Pin<&mut Self>, + _cx: &mut Context, + ) -> Poll> { + let inner = self.get_mut(); + let event = if inner.0.is_empty() { None } else { - Some(self.0.remove(0)?) + Some(inner.0.remove(0)) }; - Ok(Ready(event)) + Poll::Ready(event) } } @@ -1080,7 +1128,7 @@ pub mod tests { #[test] fn dyn_import_err() { // Test an erroneous dynamic import where the specified module isn't found. - run_in_task(|| { + run_in_task(|cx| { let count = Arc::new(AtomicUsize::new(0)); let count_ = count.clone(); let mut isolate = Isolate::new(StartupData::None, false); @@ -1103,8 +1151,10 @@ pub mod tests { assert_eq!(count.load(Ordering::Relaxed), 1); // We should get an error here. - let result = isolate.poll(); - assert!(result.is_err()); + let result = isolate.poll_unpin(cx); + if let Poll::Ready(Ok(_)) = result { + unreachable!(); + } }) } @@ -1113,7 +1163,7 @@ pub mod tests { use std::convert::TryInto; // Import multiple modules to demonstrate that after failed dynamic import // another dynamic import can still be run - run_in_task(|| { + run_in_task(|cx| { let count = Arc::new(AtomicUsize::new(0)); let count_ = count.clone(); let mut isolate = Isolate::new(StartupData::None, false); @@ -1156,15 +1206,24 @@ pub mod tests { assert_eq!(count.load(Ordering::Relaxed), 3); // Now each poll should return error - assert!(isolate.poll().is_err()); - assert!(isolate.poll().is_err()); - assert!(isolate.poll().is_err()); + assert!(match isolate.poll_unpin(cx) { + Poll::Ready(Err(_)) => true, + _ => false, + }); + assert!(match isolate.poll_unpin(cx) { + Poll::Ready(Err(_)) => true, + _ => false, + }); + assert!(match isolate.poll_unpin(cx) { + Poll::Ready(Err(_)) => true, + _ => false, + }); }) } #[test] fn dyn_import_ok() { - run_in_task(|| { + run_in_task(|cx| { let count = Arc::new(AtomicUsize::new(0)); let count_ = count.clone(); @@ -1224,9 +1283,15 @@ pub mod tests { )); assert_eq!(count.load(Ordering::Relaxed), 1); - assert_eq!(Ready(()), isolate.poll().unwrap()); + assert!(match isolate.poll_unpin(cx) { + Poll::Ready(Ok(_)) => true, + _ => false, + }); assert_eq!(count.load(Ordering::Relaxed), 2); - assert_eq!(Ready(()), isolate.poll().unwrap()); + assert!(match isolate.poll_unpin(cx) { + Poll::Ready(Ok(_)) => true, + _ => false, + }); assert_eq!(count.load(Ordering::Relaxed), 2); }) } @@ -1247,7 +1312,7 @@ pub mod tests { shared.terminate_execution(); // allow shutdown - std::thread::sleep(std::time::Duration::from_millis(100)); + std::thread::sleep(std::time::Duration::from_millis(200)); // unless reported otherwise the test should fail after this point tx_clone.send(false).ok(); @@ -1345,7 +1410,7 @@ pub mod tests { #[test] fn overflow_req_async() { - run_in_task(|| { + run_in_task(|cx| { let (mut isolate, dispatch_count) = setup(Mode::OverflowReqAsync); js_check(isolate.execute( "overflow_req_async.js", @@ -1366,14 +1431,17 @@ pub mod tests { "#, )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 1); - assert_eq!(Async::Ready(()), js_check(isolate.poll())); + assert!(match isolate.poll_unpin(cx) { + Poll::Ready(Ok(_)) => true, + _ => false, + }); js_check(isolate.execute("check.js", "assert(asyncRecv == 1);")); }); } #[test] fn overflow_res_async() { - run_in_task(|| { + run_in_task(|_cx| { // TODO(ry) This test is quite slow due to memcpy-ing 100MB into JS. We // should optimize this. let (mut isolate, dispatch_count) = setup(Mode::OverflowResAsync); @@ -1404,7 +1472,7 @@ pub mod tests { fn overflow_res_multiple_dispatch_async() { // TODO(ry) This test is quite slow due to memcpy-ing 100MB into JS. We // should optimize this. - run_in_task(|| { + run_in_task(|_cx| { let (mut isolate, dispatch_count) = setup(Mode::OverflowResAsync); js_check(isolate.execute( "overflow_res_multiple_dispatch_async.js", @@ -1434,7 +1502,7 @@ pub mod tests { #[test] fn test_pre_dispatch() { - run_in_task(|| { + run_in_task(|mut cx| { let (mut isolate, _dispatch_count) = setup(Mode::OverflowResAsync); js_check(isolate.execute( "bad_op_id.js", @@ -1448,13 +1516,15 @@ pub mod tests { assert(thrown == "Unknown op id: 100"); "#, )); - assert_eq!(Async::Ready(()), isolate.poll().unwrap()); + if let Poll::Ready(Err(_)) = isolate.poll_unpin(&mut cx) { + unreachable!(); + } }); } #[test] fn test_js() { - run_in_task(|| { + run_in_task(|mut cx| { let (mut isolate, _dispatch_count) = setup(Mode::AsyncImmediate); js_check( isolate.execute( @@ -1462,7 +1532,9 @@ pub mod tests { include_str!("shared_queue_test.js"), ), ); - assert_eq!(Async::Ready(()), isolate.poll().unwrap()); + if let Poll::Ready(Err(_)) = isolate.poll_unpin(&mut cx) { + unreachable!(); + } }); } diff --git a/core/modules.rs b/core/modules.rs index 85de79cca59de3..9f3434a4f77bb8 100644 --- a/core/modules.rs +++ b/core/modules.rs @@ -14,21 +14,22 @@ use crate::isolate::SourceCodeInfo; use crate::libdeno::deno_dyn_import_id; use crate::libdeno::deno_mod; use crate::module_specifier::ModuleSpecifier; -use futures::future::loop_fn; -use futures::future::Loop; +use futures::future::FutureExt; use futures::stream::FuturesUnordered; use futures::stream::Stream; -use futures::Async::*; -use futures::Future; -use futures::Poll; +use futures::stream::TryStreamExt; use std::collections::HashMap; use std::collections::HashSet; use std::fmt; +use std::future::Future; +use std::pin::Pin; use std::sync::Arc; use std::sync::Mutex; +use std::task::Context; +use std::task::Poll; pub type SourceCodeInfoFuture = - dyn Future + Send; + dyn Future> + Send; pub trait Loader: Send + Sync { /// Returns an absolute URL. @@ -47,7 +48,7 @@ pub trait Loader: Send + Sync { fn load( &self, module_specifier: &ModuleSpecifier, - ) -> Box; + ) -> Pin>; } #[derive(Debug, Eq, PartialEq)] @@ -68,16 +69,16 @@ enum State { /// This future is used to implement parallel async module loading without /// complicating the Isolate API. /// TODO: RecursiveLoad desperately needs to be merged with Modules. -pub struct RecursiveLoad { +pub struct RecursiveLoad { kind: Kind, state: State, loader: L, modules: Arc>, - pending: FuturesUnordered>, + pending: FuturesUnordered>>, is_pending: HashSet, } -impl RecursiveLoad { +impl RecursiveLoad { /// Starts a new parallel load of the given URL of the main module. pub fn main( specifier: &str, @@ -153,7 +154,7 @@ impl RecursiveLoad { // integrated into one thing. self .pending - .push(Box::new(self.loader.load(&module_specifier))); + .push(self.loader.load(&module_specifier).boxed()); self.state = State::LoadingRoot; Ok(()) @@ -182,7 +183,7 @@ impl RecursiveLoad { { self .pending - .push(Box::new(self.loader.load(&module_specifier))); + .push(self.loader.load(&module_specifier).boxed()); self.is_pending.insert(module_specifier); } @@ -194,26 +195,24 @@ impl RecursiveLoad { pub fn get_future( self, isolate: Arc>, - ) -> impl Future { - loop_fn(self, move |load| { - let isolate = isolate.clone(); - load.into_future().map_err(|(e, _)| e).and_then( - move |(event, mut load)| { - Ok(match event.unwrap() { - Event::Fetch(info) => { - let mut isolate = isolate.lock().unwrap(); - load.register(info, &mut isolate)?; - Loop::Continue(load) - } - Event::Instantiate(id) => Loop::Break(id), - }) - }, - ) - }) + ) -> impl Future> { + async move { + let mut load = self; + loop { + let event = load.try_next().await?; + match event.unwrap() { + Event::Fetch(info) => { + let mut isolate = isolate.lock().unwrap(); + load.register(info, &mut isolate)?; + } + Event::Instantiate(id) => return Ok(id), + } + } + } } } -impl ImportStream for RecursiveLoad { +impl ImportStream for RecursiveLoad { // TODO: this should not be part of RecursiveLoad. fn register( &mut self, @@ -308,40 +307,45 @@ impl ImportStream for RecursiveLoad { } } -impl Stream for RecursiveLoad { - type Item = Event; - type Error = ErrBox; +impl Stream for RecursiveLoad { + type Item = Result; - fn poll(&mut self) -> Poll, Self::Error> { - Ok(match self.state { + fn poll_next( + self: Pin<&mut Self>, + cx: &mut Context, + ) -> Poll> { + let inner = self.get_mut(); + match inner.state { State::ResolveMain(ref specifier, Some(ref code)) => { - let module_specifier = self.loader.resolve( + let module_specifier = inner.loader.resolve( specifier, ".", true, - self.dyn_import_id().is_some(), + inner.dyn_import_id().is_some(), )?; let info = SourceCodeInfo { code: code.to_owned(), module_url_specified: module_specifier.to_string(), module_url_found: module_specifier.to_string(), }; - self.state = State::LoadingRoot; - Ready(Some(Event::Fetch(info))) + inner.state = State::LoadingRoot; + Poll::Ready(Some(Ok(Event::Fetch(info)))) } State::ResolveMain(..) | State::ResolveImport(..) => { - self.add_root()?; - self.poll()? + if let Err(e) = inner.add_root() { + return Poll::Ready(Some(Err(e))); + } + inner.try_poll_next_unpin(cx) } State::LoadingRoot | State::LoadingImports(..) => { - match self.pending.poll()? { - Ready(None) => unreachable!(), - Ready(Some(info)) => Ready(Some(Event::Fetch(info))), - NotReady => NotReady, + match inner.pending.try_poll_next_unpin(cx)? { + Poll::Ready(None) => unreachable!(), + Poll::Ready(Some(info)) => Poll::Ready(Some(Ok(Event::Fetch(info)))), + Poll::Pending => Poll::Pending, } } - State::Instantiated(id) => Ready(Some(Event::Instantiate(id))), - }) + State::Instantiated(id) => Poll::Ready(Some(Ok(Event::Instantiate(id)))), + } } } @@ -603,9 +607,11 @@ mod tests { use super::*; use crate::isolate::js_check; use crate::isolate::tests::*; - use futures::Async; + use futures::future::FutureExt; + use futures::stream::StreamExt; use std::error::Error; use std::fmt; + use std::future::Future; struct MockLoader { pub loads: Arc>>, @@ -676,27 +682,27 @@ mod tests { } impl Future for DelayedSourceCodeFuture { - type Item = SourceCodeInfo; - type Error = ErrBox; + type Output = Result; - fn poll(&mut self) -> Poll { - self.counter += 1; - if self.url == "file:///never_ready.js" { - return Ok(Async::NotReady); + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let inner = self.get_mut(); + inner.counter += 1; + if inner.url == "file:///never_ready.js" { + return Poll::Pending; } - if self.url == "file:///slow.js" && self.counter < 2 { + if inner.url == "file:///slow.js" && inner.counter < 2 { // TODO(ry) Hopefully in the future we can remove current task // notification. See comment above run_in_task. - futures::task::current().notify(); - return Ok(Async::NotReady); + cx.waker().wake_by_ref(); + return Poll::Pending; } - match mock_source_code(&self.url) { - Some(src) => Ok(Async::Ready(SourceCodeInfo { + match mock_source_code(&inner.url) { + Some(src) => Poll::Ready(Ok(SourceCodeInfo { code: src.0.to_owned(), - module_url_specified: self.url.clone(), + module_url_specified: inner.url.clone(), module_url_found: src.1.to_owned(), })), - None => Err(MockError::LoadErr.into()), + None => Poll::Ready(Err(MockError::LoadErr.into())), } } } @@ -733,11 +739,11 @@ mod tests { fn load( &self, module_specifier: &ModuleSpecifier, - ) -> Box { + ) -> Pin> { let mut loads = self.loads.lock().unwrap(); loads.push(module_specifier.to_string()); let url = module_specifier.to_string(); - Box::new(DelayedSourceCodeFuture { url, counter: 0 }) + DelayedSourceCodeFuture { url, counter: 0 }.boxed() } } @@ -780,7 +786,7 @@ mod tests { #[test] fn test_recursive_load() { - run_in_task(|| { + run_in_task(|mut cx| { let loader = MockLoader::new(); let modules = loader.modules.clone(); let modules_ = modules.clone(); @@ -791,12 +797,12 @@ mod tests { RecursiveLoad::main("/a.js", None, loader, modules); let a_id = loop { - match recursive_load.poll() { - Ok(Ready(Some(Event::Fetch(info)))) => { + match recursive_load.try_poll_next_unpin(&mut cx) { + Poll::Ready(Some(Ok(Event::Fetch(info)))) => { let mut isolate = isolate.lock().unwrap(); recursive_load.register(info, &mut isolate).unwrap(); } - Ok(Ready(Some(Event::Instantiate(id)))) => break id, + Poll::Ready(Some(Ok(Event::Instantiate(id)))) => break id, _ => panic!("unexpected result"), }; }; @@ -859,7 +865,7 @@ mod tests { #[test] fn test_circular_load() { - run_in_task(|| { + run_in_task(|mut cx| { let loader = MockLoader::new(); let isolate = loader.isolate.clone(); let isolate_ = isolate.clone(); @@ -868,9 +874,10 @@ mod tests { let loads = loader.loads.clone(); let recursive_load = RecursiveLoad::main("/circular1.js", None, loader, modules); - let result = recursive_load.get_future(isolate.clone()).poll(); - assert!(result.is_ok()); - if let Async::Ready(circular1_id) = result.ok().unwrap() { + let mut load_fut = recursive_load.get_future(isolate.clone()).boxed(); + let result = Pin::new(&mut load_fut).poll(&mut cx); + assert!(result.is_ready()); + if let Poll::Ready(Ok(circular1_id)) = result { let mut isolate = isolate_.lock().unwrap(); js_check(isolate.mod_evaluate(circular1_id)); @@ -930,7 +937,7 @@ mod tests { #[test] fn test_redirect_load() { - run_in_task(|| { + run_in_task(|mut cx| { let loader = MockLoader::new(); let isolate = loader.isolate.clone(); let isolate_ = isolate.clone(); @@ -939,10 +946,11 @@ mod tests { let loads = loader.loads.clone(); let recursive_load = RecursiveLoad::main("/redirect1.js", None, loader, modules); - let result = recursive_load.get_future(isolate.clone()).poll(); + let mut load_fut = recursive_load.get_future(isolate.clone()).boxed(); + let result = Pin::new(&mut load_fut).poll(&mut cx); println!(">> result {:?}", result); - assert!(result.is_ok()); - if let Async::Ready(redirect1_id) = result.ok().unwrap() { + assert!(result.is_ready()); + if let Poll::Ready(Ok(redirect1_id)) = result { let mut isolate = isolate_.lock().unwrap(); js_check(isolate.mod_evaluate(redirect1_id)); let l = loads.lock().unwrap(); @@ -995,18 +1003,18 @@ mod tests { #[test] fn slow_never_ready_modules() { - run_in_task(|| { + run_in_task(|mut cx| { let loader = MockLoader::new(); let isolate = loader.isolate.clone(); let modules = loader.modules.clone(); let loads = loader.loads.clone(); let mut recursive_load = RecursiveLoad::main("/main.js", None, loader, modules) - .get_future(isolate); + .get_future(isolate) + .boxed(); - let result = recursive_load.poll(); - assert!(result.is_ok()); - assert!(result.ok().unwrap().is_not_ready()); + let result = recursive_load.poll_unpin(&mut cx); + assert!(result.is_pending()); // TODO(ry) Arguably the first time we poll only the following modules // should be loaded: @@ -1018,9 +1026,8 @@ mod tests { // run_in_task. for _ in 0..10 { - let result = recursive_load.poll(); - assert!(result.is_ok()); - assert!(result.ok().unwrap().is_not_ready()); + let result = recursive_load.poll_unpin(&mut cx); + assert!(result.is_pending()); let l = loads.lock().unwrap(); assert_eq!( l.to_vec(), @@ -1045,19 +1052,22 @@ mod tests { #[test] fn loader_disappears_after_error() { - run_in_task(|| { + run_in_task(|mut cx| { let loader = MockLoader::new(); let isolate = loader.isolate.clone(); let modules = loader.modules.clone(); let recursive_load = RecursiveLoad::main("/bad_import.js", None, loader, modules); - let result = recursive_load.get_future(isolate).poll(); - assert!(result.is_err()); - let err = result.err().unwrap(); - assert_eq!( - err.downcast_ref::().unwrap(), - &MockError::ResolveErr - ); + let mut load_fut = recursive_load.get_future(isolate).boxed(); + let result = load_fut.poll_unpin(&mut cx); + if let Poll::Ready(Err(err)) = result { + assert_eq!( + err.downcast_ref::().unwrap(), + &MockError::ResolveErr + ); + } else { + unreachable!(); + } }) } @@ -1072,7 +1082,7 @@ mod tests { #[test] fn recursive_load_main_with_code() { - run_in_task(|| { + run_in_task(|mut cx| { let loader = MockLoader::new(); let modules = loader.modules.clone(); let modules_ = modules.clone(); @@ -1090,12 +1100,12 @@ mod tests { ); let main_id = loop { - match recursive_load.poll() { - Ok(Ready(Some(Event::Fetch(info)))) => { + match recursive_load.poll_next_unpin(&mut cx) { + Poll::Ready(Some(Ok(Event::Fetch(info)))) => { let mut isolate = isolate.lock().unwrap(); recursive_load.register(info, &mut isolate).unwrap(); } - Ok(Ready(Some(Event::Instantiate(id)))) => break id, + Poll::Ready(Some(Ok(Event::Instantiate(id)))) => break id, _ => panic!("unexpected result"), }; }; diff --git a/core/ops.rs b/core/ops.rs index cce454348b01e5..3a4f51b83ece5f 100644 --- a/core/ops.rs +++ b/core/ops.rs @@ -3,13 +3,15 @@ pub use crate::libdeno::OpId; use crate::PinnedBuf; use futures::Future; use std::collections::HashMap; +use std::pin::Pin; pub type Buf = Box<[u8]>; -pub type OpAsyncFuture = Box + Send>; +pub type OpAsyncFuture = + Pin> + Send>>; pub(crate) type PendingOpFuture = - Box + Send>; + Pin> + Send>>; pub type OpResult = Result, E>; From 5224d11d58e4da35b1e9aa986b85429926463258 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartek=20Iwa=C5=84czuk?= Date: Sat, 16 Nov 2019 11:56:43 +0100 Subject: [PATCH 2/9] box httpbody --- cli/ops/fetch.rs | 5 ++++- cli/ops/io.rs | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/cli/ops/fetch.rs b/cli/ops/fetch.rs index 168d6dafce0c70..60e9e6e60bacda 100644 --- a/cli/ops/fetch.rs +++ b/cli/ops/fetch.rs @@ -69,7 +69,10 @@ pub fn op_fetch( let body = HttpBody::from(res.into_body()); let mut table = state_.lock_resource_table(); - let rid = table.add("httpBody", Box::new(StreamResource::HttpBody(body))); + let rid = table.add( + "httpBody", + Box::new(StreamResource::HttpBody(Box::new(body))), + ); let json_res = json!({ "bodyRid": rid, diff --git a/cli/ops/io.rs b/cli/ops/io.rs index 197c87c7c70496..11afb1891b8e3e 100644 --- a/cli/ops/io.rs +++ b/cli/ops/io.rs @@ -85,7 +85,7 @@ pub enum StreamResource { TcpStream(tokio::net::TcpStream), ServerTlsStream(Box>), ClientTlsStream(Box>), - HttpBody(HttpBody), + HttpBody(Box), ChildStdin(tokio_process::ChildStdin), ChildStdout(tokio_process::ChildStdout), ChildStderr(tokio_process::ChildStderr), From fe81714d8600bb88941c346c2702c7ea87018ded Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartek=20Iwa=C5=84czuk?= Date: Sat, 16 Nov 2019 12:25:07 +0100 Subject: [PATCH 3/9] try fix fetch --- cli/ops/fetch.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/cli/ops/fetch.rs b/cli/ops/fetch.rs index 60e9e6e60bacda..25cf998120e828 100644 --- a/cli/ops/fetch.rs +++ b/cli/ops/fetch.rs @@ -8,7 +8,6 @@ use crate::state::ThreadSafeState; use deno::*; use futures::future::FutureExt; use futures::future::TryFutureExt; -use futures::task::SpawnExt; use http::header::HeaderName; use http::header::HeaderValue; use http::Method; @@ -84,8 +83,5 @@ pub fn op_fetch( futures::future::ok(json_res) }); - let pool = futures::executor::ThreadPool::new().unwrap(); - let handle = pool.spawn_with_handle(future).unwrap(); - - Ok(JsonOp::Async(handle.boxed())) + Ok(JsonOp::Async(future.boxed())) } From 80f0b718bb4c82de01382659df1cf5e09d5a0665 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartek=20Iwa=C5=84czuk?= Date: Sat, 16 Nov 2019 13:13:36 +0100 Subject: [PATCH 4/9] lower timeout --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fec4fe329cc381..ec0b6fe198252f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,7 +6,7 @@ jobs: build: name: ${{ matrix.kind }} ${{ matrix.os }} runs-on: ${{ matrix.os }} - timeout-minutes: 60 + timeout-minutes: 25 strategy: matrix: os: [macOS-latest, windows-2019, ubuntu-16.04] From f2c089e0418646a53ca8a7ff4d1fc2e243c4c2f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartek=20Iwa=C5=84czuk?= Date: Sat, 16 Nov 2019 13:41:25 +0100 Subject: [PATCH 5/9] disable [http] serveTLS test --- .github/workflows/ci.yml | 2 +- std/http/server_test.ts | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index adfcfdb4cdd197..5fc67bf5b052a7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,7 +6,7 @@ jobs: build: name: ${{ matrix.kind }} ${{ matrix.os }} runs-on: ${{ matrix.os }} - timeout-minutes: 25 + timeout-minutes: 30 strategy: matrix: os: [macOS-latest, windows-2019, ubuntu-16.04] diff --git a/std/http/server_test.ts b/std/http/server_test.ts index 525e40d6f2df55..6fc27288edf718 100644 --- a/std/http/server_test.ts +++ b/std/http/server_test.ts @@ -528,6 +528,8 @@ test({ } }); +// TODO: reenable this test +/* test({ name: "[http] serveTLS", async fn(): Promise { @@ -589,5 +591,6 @@ test({ assertEquals(await nextAfterClosing, { value: undefined, done: true }); } }); +*/ runIfMain(import.meta); From 5ba053f90c30d5df6d171f5c1d8c620b0cd4e12e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartek=20Iwa=C5=84czuk?= Date: Sat, 16 Nov 2019 13:46:39 +0100 Subject: [PATCH 6/9] lint --- std/http/server_test.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/std/http/server_test.ts b/std/http/server_test.ts index 6fc27288edf718..b7e2a4fc24dddd 100644 --- a/std/http/server_test.ts +++ b/std/http/server_test.ts @@ -14,7 +14,6 @@ import { ServerRequest, writeResponse, readRequest, - serve, parseHTTPVersion } from "./server.ts"; import { delay } from "../util/async.ts"; From 1ae136bfde84a4c6c9847e38a223e5a4e2cc8c09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartek=20Iwa=C5=84czuk?= Date: Sat, 16 Nov 2019 14:17:10 +0100 Subject: [PATCH 7/9] bring back old timeout --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5fc67bf5b052a7..bbdf02d57dde7c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,7 +6,7 @@ jobs: build: name: ${{ matrix.kind }} ${{ matrix.os }} runs-on: ${{ matrix.os }} - timeout-minutes: 30 + timeout-minutes: 60 strategy: matrix: os: [macOS-latest, windows-2019, ubuntu-16.04] From 82fbfa436eb98052be818aa1478c88ce3207965f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartek=20Iwa=C5=84czuk?= Date: Sat, 16 Nov 2019 15:01:13 +0100 Subject: [PATCH 8/9] enable [http] serveTLS test --- cli/ops/tls.rs | 33 ++++++++++++++++++++------------- std/http/server_test.ts | 3 --- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/cli/ops/tls.rs b/cli/ops/tls.rs index 92c883ac189d98..484b7057cc6f5e 100644 --- a/cli/ops/tls.rs +++ b/cli/ops/tls.rs @@ -11,6 +11,8 @@ use deno::Resource; use deno::*; use futures::future::FutureExt; use futures::future::TryFutureExt; +use futures::stream::StreamExt; +use futures::stream::TryStreamExt; use std; use std::convert::From; use std::fs::File; @@ -22,9 +24,9 @@ use std::sync::Arc; use std::task::Context; use std::task::Poll; use tokio; +use tokio::net::tcp::Incoming; use tokio::net::TcpListener; use tokio::net::TcpStream; -use tokio::prelude::Async; use tokio_rustls::{rustls::ClientConfig, TlsConnector}; use tokio_rustls::{ rustls::{ @@ -99,7 +101,6 @@ pub fn op_dial_tls( let reader = &mut BufReader::new(key_file); config.root_store.add_pem_file(reader).unwrap(); } - let tls_connector = TlsConnector::from(Arc::new(config)); futures::future::ok(( tls_connector, @@ -196,7 +197,7 @@ fn load_keys(path: &str) -> Result, ErrBox> { #[allow(dead_code)] pub struct TlsListenerResource { - listener: tokio::net::TcpListener, + listener: Incoming, tls_acceptor: TlsAcceptor, waker: Option, local_addr: SocketAddr, @@ -286,7 +287,7 @@ fn op_listen_tls( let local_addr = listener.local_addr()?; let local_addr_str = local_addr.to_string(); let tls_listener_resource = TlsListenerResource { - listener, + listener: listener.incoming(), tls_acceptor, waker: None, local_addr, @@ -343,44 +344,50 @@ impl Future for AcceptTls { ErrBox::from(e) })?; - let listener = &mut listener_resource.listener; + let mut listener = + futures::compat::Compat01As03::new(&mut listener_resource.listener) + .map_err(ErrBox::from); if inner.accept_state == AcceptTlsState::Eager { // Similar to try_ready!, but also track/untrack accept task // in TcpListener resource. // In this way, when the listener is closed, the task can be // notified to error out (instead of stuck forever). - match listener.poll_accept().map_err(ErrBox::from) { - Ok(Async::Ready((stream, addr))) => { + match listener.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(stream))) => { inner.accept_state = AcceptTlsState::Done; + let addr = stream.peer_addr().unwrap(); return Poll::Ready(Ok((stream, addr))); } - Ok(Async::NotReady) => { + Poll::Pending => { inner.accept_state = AcceptTlsState::Pending; return Poll::Pending; } - Err(e) => { + Poll::Ready(Some(Err(e))) => { inner.accept_state = AcceptTlsState::Done; return Poll::Ready(Err(e)); } + _ => unreachable!(), } } - match listener.poll_accept().map_err(ErrBox::from) { - Ok(Async::Ready((stream, addr))) => { + match listener.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(stream))) => { listener_resource.untrack_task(); inner.accept_state = AcceptTlsState::Done; + let addr = stream.peer_addr().unwrap(); Poll::Ready(Ok((stream, addr))) } - Ok(Async::NotReady) => { + Poll::Pending => { listener_resource.track_task(cx)?; Poll::Pending } - Err(e) => { + Poll::Ready(Some(Err(e))) => { listener_resource.untrack_task(); inner.accept_state = AcceptTlsState::Done; Poll::Ready(Err(e)) } + _ => unreachable!(), } } } diff --git a/std/http/server_test.ts b/std/http/server_test.ts index b7e2a4fc24dddd..fa859dc78512e0 100644 --- a/std/http/server_test.ts +++ b/std/http/server_test.ts @@ -527,8 +527,6 @@ test({ } }); -// TODO: reenable this test -/* test({ name: "[http] serveTLS", async fn(): Promise { @@ -590,6 +588,5 @@ test({ assertEquals(await nextAfterClosing, { value: undefined, done: true }); } }); -*/ runIfMain(import.meta); From d3984f0ccd4753751e23b41db3cf48f02bc23b23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartek=20Iwa=C5=84czuk?= Date: Sat, 16 Nov 2019 15:14:11 +0100 Subject: [PATCH 9/9] fix import --- std/http/server_test.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/std/http/server_test.ts b/std/http/server_test.ts index fa859dc78512e0..b9ca14630285ef 100644 --- a/std/http/server_test.ts +++ b/std/http/server_test.ts @@ -12,6 +12,7 @@ import { assert, assertEquals, assertNotEquals } from "../testing/asserts.ts"; import { Response, ServerRequest, + serve, writeResponse, readRequest, parseHTTPVersion