diff --git a/src/Cargo.lock b/src/Cargo.lock index 3c8d1164863de..f2c7fae182d5d 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -164,6 +164,11 @@ dependencies = [ "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "byteorder" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "cargo" version = "0.21.0" @@ -686,6 +691,14 @@ name = "log" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "log_settings" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "lzma-sys" version = "0.1.7" @@ -1349,8 +1362,10 @@ dependencies = [ name = "rustc_mir" version = "0.0.0" dependencies = [ + "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "graphviz 0.0.0", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc_bitflags 0.0.0", "rustc_const_eval 0.0.0", @@ -2060,6 +2075,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1370e9fc2a6ae53aea8b7a5110edbd08836ed87c88736dfabccade1c2b44bff4" "checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" "checksum bufstream 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f2f382711e76b9de6c744cc00d0497baba02fb00a787f088c879f01d09468e32" +"checksum byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff81738b726f5d099632ceaffe7fb65b90212e8dce59d518729e7e8634032d3d" "checksum cargo 0.21.0 (git+https://github.com/rust-lang/cargo)" = "" "checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de" "checksum clap 2.25.0 (registry+https://github.com/rust-lang/crates.io-index)" = "867a885995b4184be051b70a592d4d70e32d7a188db6e8dff626af286a962771" @@ -2099,6 +2115,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum libssh2-sys 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0db4ec23611747ef772db1c4d650f8bd762f07b461727ec998f953c614024b75" "checksum libz-sys 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "3fdd64ef8ee652185674455c1d450b83cbc8ad895625d543b5324d923f82e4d8" "checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b" +"checksum log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3d382732ea0fbc09790c4899db3255bdea0fc78b54bf234bd18a63bb603915b6" "checksum lzma-sys 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "013fa6506eb7d26040c46dab9ecb7ccb4e2896b5bf24a9d65932501ea9f67af8" "checksum matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "100aabe6b8ff4e4a7e32c1c13523379802df0772b82466207ac25b013f193376" "checksum mdbook 0.0.22 (registry+https://github.com/rust-lang/crates.io-index)" = "22911d86cde6f80fa9f0fb2a68bbbde85d97af4fe0ce267141c83a4187d28700" diff --git a/src/librustc_mir/Cargo.toml b/src/librustc_mir/Cargo.toml index 6e42e02d5109b..32ec2669044a8 100644 --- a/src/librustc_mir/Cargo.toml +++ b/src/librustc_mir/Cargo.toml @@ -18,3 +18,5 @@ rustc_data_structures = { path = "../librustc_data_structures" } rustc_bitflags = { path = "../librustc_bitflags" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } +byteorder = { version = "1.1", features = ["i128"]} +log_settings = "0.1.1" diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index bb1767773327c..ef8ef8757ba83 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -30,6 +30,7 @@ Rust MIR: a lowered representation of Rust. Also: an experiment! #![cfg_attr(stage0, feature(associated_consts))] #[macro_use] extern crate log; +extern crate log_settings; extern crate graphviz as dot; #[macro_use] extern crate rustc; @@ -44,6 +45,9 @@ extern crate rustc_const_math; extern crate rustc_const_eval; extern crate core; // for NonZero +extern crate byteorder; + + pub mod diagnostics; mod build; @@ -52,6 +56,7 @@ mod hair; mod shim; pub mod transform; pub mod util; +pub mod miri; use rustc::ty::maps::Providers; diff --git a/src/librustc_mir/miri/LICENSE-APACHE b/src/librustc_mir/miri/LICENSE-APACHE new file mode 100644 index 0000000000000..a32595fa70bc1 --- /dev/null +++ b/src/librustc_mir/miri/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright 2016 The Miri Developers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/librustc_mir/miri/LICENSE-MIT b/src/librustc_mir/miri/LICENSE-MIT new file mode 100644 index 0000000000000..1f9d89a5862b5 --- /dev/null +++ b/src/librustc_mir/miri/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 The Miri Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/librustc_mir/miri/README.md b/src/librustc_mir/miri/README.md new file mode 100644 index 0000000000000..3423f1d9d810d --- /dev/null +++ b/src/librustc_mir/miri/README.md @@ -0,0 +1,23 @@ +An interpreter for [Rust][rust]'s [mid-level intermediate +representation][mir] (MIR). + +## Debugging + +You can get detailed, statement-by-statement traces by setting the `RUST_LOG` +environment variable to `rustc_mir::miri=trace`. These traces are indented based on call stack +depth. You can get a much less verbose set of information with other logging +levels such as `warn`. + +## License + +Licensed under either of + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + http://opensource.org/licenses/MIT) at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you shall be dual licensed as above, without any +additional terms or conditions. diff --git a/src/librustc_mir/miri/cast.rs b/src/librustc_mir/miri/cast.rs new file mode 100644 index 0000000000000..173bdee1ecc01 --- /dev/null +++ b/src/librustc_mir/miri/cast.rs @@ -0,0 +1,117 @@ +use rustc::ty::{self, Ty}; +use syntax::ast::{FloatTy, IntTy, UintTy}; + +use super::error::{EvalResult, EvalError}; +use super::eval_context::EvalContext; +use super::value::PrimVal; + +impl<'a, 'tcx> EvalContext<'a, 'tcx> { + pub(super) fn cast_primval( + &self, + val: PrimVal, + src_ty: Ty<'tcx>, + dest_ty: Ty<'tcx> + ) -> EvalResult<'tcx, PrimVal> { + let kind = self.ty_to_primval_kind(src_ty)?; + + use super::value::PrimValKind::*; + match kind { + F32 => self.cast_float(val.to_f32()? as f64, dest_ty), + F64 => self.cast_float(val.to_f64()?, dest_ty), + + I8 | I16 | I32 | I64 | I128 => { + if val.is_ptr() { + self.cast_ptr(val, dest_ty) + } else { + self.cast_signed_int(val.to_i128()?, dest_ty) + } + }, + + Bool | Char | U8 | U16 | U32 | U64 | U128 => { + if val.is_ptr() { + self.cast_ptr(val, dest_ty) + } else { + self.cast_int(val.to_u128()?, dest_ty, false) + } + }, + + FnPtr | Ptr => self.cast_ptr(val, dest_ty), + } + } + + fn cast_signed_int(&self, val: i128, ty: ty::Ty<'tcx>) -> EvalResult<'tcx, PrimVal> { + self.cast_int(val as u128, ty, val < 0) + } + + fn cast_int(&self, v: u128, ty: ty::Ty<'tcx>, negative: bool) -> EvalResult<'tcx, PrimVal> { + use rustc::ty::TypeVariants::*; + match ty.sty { + TyBool if v == 0 => Ok(PrimVal::from_bool(false)), + TyBool if v == 1 => Ok(PrimVal::from_bool(true)), + TyBool => Err(EvalError::InvalidBool), + + TyInt(IntTy::I8) => Ok(PrimVal::Bytes(v as i128 as i8 as u128)), + TyInt(IntTy::I16) => Ok(PrimVal::Bytes(v as i128 as i16 as u128)), + TyInt(IntTy::I32) => Ok(PrimVal::Bytes(v as i128 as i32 as u128)), + TyInt(IntTy::I64) => Ok(PrimVal::Bytes(v as i128 as i64 as u128)), + TyInt(IntTy::I128) => Ok(PrimVal::Bytes(v as u128)), + + TyUint(UintTy::U8) => Ok(PrimVal::Bytes(v as u8 as u128)), + TyUint(UintTy::U16) => Ok(PrimVal::Bytes(v as u16 as u128)), + TyUint(UintTy::U32) => Ok(PrimVal::Bytes(v as u32 as u128)), + TyUint(UintTy::U64) => Ok(PrimVal::Bytes(v as u64 as u128)), + TyUint(UintTy::U128) => Ok(PrimVal::Bytes(v)), + + TyInt(IntTy::Is) => { + let int_ty = self.tcx.sess.target.int_type; + let ty = self.tcx.mk_mach_int(int_ty); + self.cast_int(v, ty, negative) + } + + TyUint(UintTy::Us) => { + let uint_ty = self.tcx.sess.target.uint_type; + let ty = self.tcx.mk_mach_uint(uint_ty); + self.cast_int(v, ty, negative) + } + + TyFloat(FloatTy::F64) if negative => Ok(PrimVal::from_f64(v as i128 as f64)), + TyFloat(FloatTy::F64) => Ok(PrimVal::from_f64(v as f64)), + TyFloat(FloatTy::F32) if negative => Ok(PrimVal::from_f32(v as i128 as f32)), + TyFloat(FloatTy::F32) => Ok(PrimVal::from_f32(v as f32)), + + TyChar if v as u8 as u128 == v => Ok(PrimVal::Bytes(v)), + TyChar => Err(EvalError::InvalidChar(v)), + + // No alignment check needed for raw pointers + TyRawPtr(_) => Ok(PrimVal::Bytes(v % (1 << self.memory.pointer_size()))), + + _ => Err(EvalError::Unimplemented(format!("int to {:?} cast", ty))), + } + } + + fn cast_float(&self, val: f64, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> { + use rustc::ty::TypeVariants::*; + match ty.sty { + // Casting negative floats to unsigned integers yields zero. + TyUint(_) if val < 0.0 => self.cast_int(0, ty, false), + TyInt(_) if val < 0.0 => self.cast_int(val as i128 as u128, ty, true), + + TyInt(_) | ty::TyUint(_) => self.cast_int(val as u128, ty, false), + + TyFloat(FloatTy::F64) => Ok(PrimVal::from_f64(val)), + TyFloat(FloatTy::F32) => Ok(PrimVal::from_f32(val as f32)), + _ => Err(EvalError::Unimplemented(format!("float to {:?} cast", ty))), + } + } + + fn cast_ptr(&self, ptr: PrimVal, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> { + use rustc::ty::TypeVariants::*; + match ty.sty { + // Casting to a reference or fn pointer is not permitted by rustc, no need to support it here. + TyRawPtr(_) | TyInt(IntTy::Is) | TyUint(UintTy::Us) => + Ok(ptr), + TyInt(_) | TyUint(_) => Err(EvalError::ReadPointerAsBytes), + _ => Err(EvalError::Unimplemented(format!("ptr to {:?} cast", ty))), + } + } +} diff --git a/src/librustc_mir/miri/const_eval.rs b/src/librustc_mir/miri/const_eval.rs new file mode 100644 index 0000000000000..05afacc567a1e --- /dev/null +++ b/src/librustc_mir/miri/const_eval.rs @@ -0,0 +1,76 @@ +use rustc::traits::Reveal; +use rustc::ty::{self, TyCtxt, Ty, Instance}; +use syntax::ast::Mutability; + +use super::error::{EvalError, EvalResult}; +use super::lvalue::{Global, GlobalId, Lvalue}; +use super::value::PrimVal; +use rustc_const_math::ConstInt; +use super::eval_context::{EvalContext, StackPopCleanup}; +use super::ResourceLimits; + +pub fn eval_body_as_primval<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + instance: Instance<'tcx>, +) -> EvalResult<'tcx, (PrimVal, Ty<'tcx>)> { + let limits = ResourceLimits::default(); + let mut ecx = EvalContext::new(tcx, limits); + let cid = GlobalId { instance, promoted: None }; + if ecx.tcx.has_attr(instance.def_id(), "linkage") { + return Err(EvalError::NotConst("extern global".to_string())); + } + + let mir = ecx.load_mir(instance.def)?; + if !ecx.globals.contains_key(&cid) { + ecx.globals.insert(cid, Global::uninitialized(mir.return_ty)); + let mutable = !mir.return_ty.is_freeze( + ecx.tcx, + ty::ParamEnv::empty(Reveal::All), + mir.span); + let mutability = if mutable { + Mutability::Mutable + } else { + Mutability::Immutable + }; + let cleanup = StackPopCleanup::MarkStatic(mutability); + let name = ty::tls::with(|tcx| tcx.item_path_str(instance.def_id())); + trace!("pushing stack frame for global: {}", name); + ecx.push_stack_frame( + instance, + mir.span, + mir, + Lvalue::Global(cid), + cleanup, + )?; + + while ecx.step()? {} + } + let value = ecx.globals.get(&cid).expect("global not cached").value; + Ok((ecx.value_to_primval(value, mir.return_ty)?, mir.return_ty)) +} + +pub fn eval_body_as_integer<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + instance: Instance<'tcx>, +) -> EvalResult<'tcx, ConstInt> { + let (prim, ty) = eval_body_as_primval(tcx, instance)?; + let prim = prim.to_bytes()?; + use syntax::ast::{IntTy, UintTy}; + use rustc::ty::TypeVariants::*; + use rustc_const_math::{ConstIsize, ConstUsize}; + Ok(match ty.sty { + TyInt(IntTy::I8) => ConstInt::I8(prim as i128 as i8), + TyInt(IntTy::I16) => ConstInt::I16(prim as i128 as i16), + TyInt(IntTy::I32) => ConstInt::I32(prim as i128 as i32), + TyInt(IntTy::I64) => ConstInt::I64(prim as i128 as i64), + TyInt(IntTy::I128) => ConstInt::I128(prim as i128), + TyInt(IntTy::Is) => ConstInt::Isize(ConstIsize::new(prim as i128 as i64, tcx.sess.target.int_type).expect("miri should already have errored")), + TyUint(UintTy::U8) => ConstInt::U8(prim as u8), + TyUint(UintTy::U16) => ConstInt::U16(prim as u16), + TyUint(UintTy::U32) => ConstInt::U32(prim as u32), + TyUint(UintTy::U64) => ConstInt::U64(prim as u64), + TyUint(UintTy::U128) => ConstInt::U128(prim), + TyUint(UintTy::Us) => ConstInt::Usize(ConstUsize::new(prim as u64, tcx.sess.target.uint_type).expect("miri should already have errored")), + _ => return Err(EvalError::NeedsRfc("evaluating anything other than isize/usize during typeck".to_string())), + }) +} diff --git a/src/librustc_mir/miri/error.rs b/src/librustc_mir/miri/error.rs new file mode 100644 index 0000000000000..de1779c69c2be --- /dev/null +++ b/src/librustc_mir/miri/error.rs @@ -0,0 +1,233 @@ +use std::error::Error; +use std::fmt; +use rustc::mir; +use rustc::ty::{FnSig, Ty, layout}; +use super::memory::{MemoryPointer, Kind}; +use rustc_const_math::ConstMathErr; +use syntax::codemap::Span; + +#[derive(Clone, Debug)] +pub enum EvalError<'tcx> { + FunctionPointerTyMismatch(FnSig<'tcx>, FnSig<'tcx>), + NoMirFor(String), + UnterminatedCString(MemoryPointer), + DanglingPointerDeref, + DoubleFree, + InvalidMemoryAccess, + InvalidFunctionPointer, + InvalidBool, + InvalidDiscriminant, + PointerOutOfBounds { + ptr: MemoryPointer, + access: bool, + allocation_size: u64, + }, + InvalidNullPointerUsage, + ReadPointerAsBytes, + ReadBytesAsPointer, + InvalidPointerMath, + ReadUndefBytes, + DeadLocal, + InvalidBoolOp(mir::BinOp), + Unimplemented(String), + DerefFunctionPointer, + ExecuteMemory, + ArrayIndexOutOfBounds(Span, u64, u64), + Math(Span, ConstMathErr), + Intrinsic(String), + OverflowingMath, + InvalidChar(u128), + OutOfMemory { + allocation_size: u64, + memory_size: u64, + memory_usage: u64, + }, + ExecutionTimeLimitReached, + StackFrameLimitReached, + OutOfTls, + TlsOutOfBounds, + AbiViolation(String), + AlignmentCheckFailed { + required: u64, + has: u64, + }, + CalledClosureAsFunction, + VtableForArgumentlessMethod, + ModifiedConstantMemory, + AssumptionNotHeld, + InlineAsm, + TypeNotPrimitive(Ty<'tcx>), + ReallocatedWrongMemoryKind(Kind, Kind), + DeallocatedWrongMemoryKind(Kind, Kind), + ReallocateNonBasePtr, + DeallocateNonBasePtr, + IncorrectAllocationInformation, + Layout(layout::LayoutError<'tcx>), + HeapAllocZeroBytes, + HeapAllocNonPowerOfTwoAlignment(u64), + Unreachable, + Panic, + NeedsRfc(String), + NotConst(String), + ReadFromReturnPointer, + PathNotFound(Vec), +} + +pub type EvalResult<'tcx, T = ()> = Result>; + +impl<'tcx> Error for EvalError<'tcx> { + fn description(&self) -> &str { + use self::EvalError::*; + match *self { + FunctionPointerTyMismatch(..) => + "tried to call a function through a function pointer of a different type", + InvalidMemoryAccess => + "tried to access memory through an invalid pointer", + DanglingPointerDeref => + "dangling pointer was dereferenced", + DoubleFree => + "tried to deallocate dangling pointer", + InvalidFunctionPointer => + "tried to use an integer pointer or a dangling pointer as a function pointer", + InvalidBool => + "invalid boolean value read", + InvalidDiscriminant => + "invalid enum discriminant value read", + PointerOutOfBounds { .. } => + "pointer offset outside bounds of allocation", + InvalidNullPointerUsage => + "invalid use of NULL pointer", + ReadPointerAsBytes => + "a raw memory access tried to access part of a pointer value as raw bytes", + ReadBytesAsPointer => + "a memory access tried to interpret some bytes as a pointer", + InvalidPointerMath => + "attempted to do invalid arithmetic on pointers that would leak base addresses, e.g. comparing pointers into different allocations", + ReadUndefBytes => + "attempted to read undefined bytes", + DeadLocal => + "tried to access a dead local variable", + InvalidBoolOp(_) => + "invalid boolean operation", + Unimplemented(ref msg) => msg, + DerefFunctionPointer => + "tried to dereference a function pointer", + ExecuteMemory => + "tried to treat a memory pointer as a function pointer", + ArrayIndexOutOfBounds(..) => + "array index out of bounds", + Math(..) => + "mathematical operation failed", + Intrinsic(..) => + "intrinsic failed", + OverflowingMath => + "attempted to do overflowing math", + NoMirFor(..) => + "mir not found", + InvalidChar(..) => + "tried to interpret an invalid 32-bit value as a char", + OutOfMemory{..} => + "could not allocate more memory", + ExecutionTimeLimitReached => + "reached the configured maximum execution time", + StackFrameLimitReached => + "reached the configured maximum number of stack frames", + OutOfTls => + "reached the maximum number of representable TLS keys", + TlsOutOfBounds => + "accessed an invalid (unallocated) TLS key", + AbiViolation(ref msg) => msg, + AlignmentCheckFailed{..} => + "tried to execute a misaligned read or write", + CalledClosureAsFunction => + "tried to call a closure through a function pointer", + VtableForArgumentlessMethod => + "tried to call a vtable function without arguments", + ModifiedConstantMemory => + "tried to modify constant memory", + AssumptionNotHeld => + "`assume` argument was false", + InlineAsm => + "miri does not support inline assembly", + TypeNotPrimitive(_) => + "expected primitive type, got nonprimitive", + ReallocatedWrongMemoryKind(_, _) => + "tried to reallocate memory from one kind to another", + DeallocatedWrongMemoryKind(_, _) => + "tried to deallocate memory of the wrong kind", + ReallocateNonBasePtr => + "tried to reallocate with a pointer not to the beginning of an existing object", + DeallocateNonBasePtr => + "tried to deallocate with a pointer not to the beginning of an existing object", + IncorrectAllocationInformation => + "tried to deallocate or reallocate using incorrect alignment or size", + Layout(_) => + "rustc layout computation failed", + UnterminatedCString(_) => + "attempted to get length of a null terminated string, but no null found before end of allocation", + HeapAllocZeroBytes => + "tried to re-, de- or allocate zero bytes on the heap", + HeapAllocNonPowerOfTwoAlignment(_) => + "tried to re-, de-, or allocate heap memory with alignment that is not a power of two", + Unreachable => + "entered unreachable code", + Panic => + "the evaluated program panicked", + NeedsRfc(_) => + "this feature needs an rfc before being allowed inside constants", + NotConst(_) => + "this feature is not compatible with constant evaluation", + ReadFromReturnPointer => + "tried to read from the return pointer", + EvalError::PathNotFound(_) => + "a path could not be resolved, maybe the crate is not loaded", + } + } + + fn cause(&self) -> Option<&Error> { None } +} + +impl<'tcx> fmt::Display for EvalError<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::EvalError::*; + match *self { + PointerOutOfBounds { ptr, access, allocation_size } => { + write!(f, "{} at offset {}, outside bounds of allocation {} which has size {}", + if access { "memory access" } else { "pointer computed" }, + ptr.offset, ptr.alloc_id, allocation_size) + }, + NoMirFor(ref func) => write!(f, "no mir for `{}`", func), + FunctionPointerTyMismatch(sig, got) => + write!(f, "tried to call a function with sig {} through a function pointer of type {}", sig, got), + ArrayIndexOutOfBounds(span, len, index) => + write!(f, "index out of bounds: the len is {} but the index is {} at {:?}", len, index, span), + ReallocatedWrongMemoryKind(old, new) => + write!(f, "tried to reallocate memory from {:?} to {:?}", old, new), + DeallocatedWrongMemoryKind(old, new) => + write!(f, "tried to deallocate {:?} memory but gave {:?} as the kind", old, new), + Math(span, ref err) => + write!(f, "{:?} at {:?}", err, span), + Intrinsic(ref err) => + write!(f, "{}", err), + InvalidChar(c) => + write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c), + OutOfMemory { allocation_size, memory_size, memory_usage } => + write!(f, "tried to allocate {} more bytes, but only {} bytes are free of the {} byte memory", + allocation_size, memory_size - memory_usage, memory_size), + AlignmentCheckFailed { required, has } => + write!(f, "tried to access memory with alignment {}, but alignment {} is required", + has, required), + TypeNotPrimitive(ty) => + write!(f, "expected primitive type, got {}", ty), + Layout(ref err) => + write!(f, "rustc layout computation failed: {:?}", err), + NeedsRfc(ref msg) => + write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg), + NotConst(ref msg) => + write!(f, "Cannot evaluate within constants: \"{}\"", msg), + EvalError::PathNotFound(ref path) => + write!(f, "Cannot find path {:?}", path), + _ => write!(f, "{}", self.description()), + } + } +} diff --git a/src/librustc_mir/miri/eval_context.rs b/src/librustc_mir/miri/eval_context.rs new file mode 100644 index 0000000000000..d0772dbfa7996 --- /dev/null +++ b/src/librustc_mir/miri/eval_context.rs @@ -0,0 +1,2159 @@ +use std::collections::{HashMap, HashSet}; +use std::fmt::Write; + +use rustc::hir::def_id::DefId; +use rustc::hir::map::definitions::DefPathData; +use rustc::middle::const_val::ConstVal; +use rustc::mir; +use rustc::traits::Reveal; +use rustc::ty::layout::{self, Layout, Size}; +use rustc::ty::subst::{Subst, Substs, Kind}; +use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, Binder}; +use rustc::traits; +use rustc_data_structures::indexed_vec::Idx; +use syntax::codemap::{self, DUMMY_SP, Span}; +use syntax::ast::{self, Mutability}; +use syntax::abi::Abi; + +use super::error::{EvalError, EvalResult}; +use super::lvalue::{Global, GlobalId, Lvalue, LvalueExtra}; +use super::memory::{Memory, MemoryPointer, TlsKey, HasMemory}; +use super::memory::Kind as MemoryKind; +use super::operator; +use super::value::{PrimVal, PrimValKind, Value, Pointer}; + +pub struct EvalContext<'a, 'tcx: 'a> { + /// The results of the type checker, from rustc. + pub(crate) tcx: TyCtxt<'a, 'tcx, 'tcx>, + + /// The virtual memory system. + pub(crate) memory: Memory<'a, 'tcx>, + + /// Precomputed statics, constants and promoteds. + pub(crate) globals: HashMap, Global<'tcx>>, + + /// The virtual call stack. + pub(crate) stack: Vec>, + + /// The maximum number of stack frames allowed + pub(crate) stack_limit: usize, + + /// The maximum number of operations that may be executed. + /// This prevents infinite loops and huge computations from freezing up const eval. + /// Remove once halting problem is solved. + pub(crate) steps_remaining: u64, + + /// Environment variables set by `setenv` + /// Miri does not expose env vars from the host to the emulated program + pub(crate) env_vars: HashMap, MemoryPointer>, +} + +/// A stack frame. +pub struct Frame<'tcx> { + //////////////////////////////////////////////////////////////////////////////// + // Function and callsite information + //////////////////////////////////////////////////////////////////////////////// + + /// The MIR for the function called on this frame. + pub mir: &'tcx mir::Mir<'tcx>, + + /// The def_id and substs of the current function + pub instance: ty::Instance<'tcx>, + + /// The span of the call site. + pub span: codemap::Span, + + //////////////////////////////////////////////////////////////////////////////// + // Return lvalue and locals + //////////////////////////////////////////////////////////////////////////////// + + /// The block to return to when returning from the current stack frame + pub return_to_block: StackPopCleanup, + + /// The location where the result of the current stack frame should be written to. + pub return_lvalue: Lvalue<'tcx>, + + /// The list of locals for this stack frame, stored in order as + /// `[arguments..., variables..., temporaries...]`. The locals are stored as `Option`s. + /// `None` represents a local that is currently dead, while a live local + /// can either directly contain `PrimVal` or refer to some part of an `Allocation`. + /// + /// Before being initialized, arguments are `Value::ByVal(PrimVal::Undef)` and other locals are `None`. + pub locals: Vec>, + + //////////////////////////////////////////////////////////////////////////////// + // Current position within the function + //////////////////////////////////////////////////////////////////////////////// + + /// The block that is currently executed (or will be executed after the above call stacks + /// return). + pub block: mir::BasicBlock, + + /// The index of the currently evaluated statment. + pub stmt: usize, +} + +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +pub enum StackPopCleanup { + /// The stackframe existed to compute the initial value of a static/constant, make sure it + /// isn't modifyable afterwards in case of constants. + /// In case of `static mut`, mark the memory to ensure it's never marked as immutable through + /// references or deallocated + MarkStatic(Mutability), + /// A regular stackframe added due to a function call will need to get forwarded to the next + /// block + Goto(mir::BasicBlock), + /// After finishing a tls destructor, find the next one instead of starting from the beginning + /// and thus just rerunning the first one until its `data` argument is null + /// + /// The index is the current tls destructor's index + Tls(Option), + /// The main function and diverging functions have nowhere to return to + None, +} + +#[derive(Copy, Clone, Debug)] +pub struct ResourceLimits { + pub memory_size: u64, + pub step_limit: u64, + pub stack_limit: usize, +} + +impl Default for ResourceLimits { + fn default() -> Self { + ResourceLimits { + memory_size: 100 * 1024 * 1024, // 100 MB + step_limit: 1_000_000, + stack_limit: 100, + } + } +} + +impl<'a, 'tcx> EvalContext<'a, 'tcx> { + pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, limits: ResourceLimits) -> Self { + EvalContext { + tcx, + memory: Memory::new(&tcx.data_layout, limits.memory_size), + globals: HashMap::new(), + stack: Vec::new(), + stack_limit: limits.stack_limit, + steps_remaining: limits.step_limit, + env_vars: HashMap::new(), + } + } + + pub fn alloc_ptr(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, MemoryPointer> { + let substs = self.substs(); + self.alloc_ptr_with_substs(ty, substs) + } + + pub fn alloc_ptr_with_substs( + &mut self, + ty: Ty<'tcx>, + substs: &'tcx Substs<'tcx> + ) -> EvalResult<'tcx, MemoryPointer> { + let size = self.type_size_with_substs(ty, substs)?.expect("cannot alloc memory for unsized type"); + let align = self.type_align_with_substs(ty, substs)?; + self.memory.allocate(size, align, MemoryKind::Stack) + } + + pub fn memory(&self) -> &Memory<'a, 'tcx> { + &self.memory + } + + pub fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx> { + &mut self.memory + } + + pub fn stack(&self) -> &[Frame<'tcx>] { + &self.stack + } + + /// Returns true if the current frame or any parent frame is part of a ctfe. + /// + /// Used to disable features in const eval, which do not have a rfc enabling + /// them or which can't be written in a way that they produce the same output + /// that evaluating the code at runtime would produce. + pub fn const_env(&self) -> bool { + for frame in self.stack.iter().rev() { + if let StackPopCleanup::MarkStatic(_) = frame.return_to_block { + return true; + } + } + false + } + + pub(crate) fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { + let ptr = self.memory.allocate_cached(s.as_bytes())?; + Ok(Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::from_u128(s.len() as u128))) + } + + pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>) -> EvalResult<'tcx, Value> { + use rustc::middle::const_val::ConstVal::*; + use rustc_const_math::ConstFloat; + + let primval = match *const_val { + Integral(const_int) => PrimVal::Bytes(const_int.to_u128_unchecked()), + + Float(ConstFloat::F32(f)) => PrimVal::from_f32(f), + Float(ConstFloat::F64(f)) => PrimVal::from_f64(f), + + Bool(b) => PrimVal::from_bool(b), + Char(c) => PrimVal::from_char(c), + + Str(ref s) => return self.str_to_value(s), + + ByteStr(ref bs) => { + let ptr = self.memory.allocate_cached(bs)?; + PrimVal::Ptr(ptr) + } + + Variant(_) => unimplemented!(), + Struct(_) => unimplemented!(), + Tuple(_) => unimplemented!(), + // function items are zero sized and thus have no readable value + Function(..) => PrimVal::Undef, + Array(_) => unimplemented!(), + Repeat(_, _) => unimplemented!(), + }; + + Ok(Value::ByVal(primval)) + } + + pub(super) fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { + // generics are weird, don't run this function on a generic + assert!(!ty.needs_subst()); + ty.is_sized(self.tcx, ty::ParamEnv::empty(Reveal::All), DUMMY_SP) + } + + pub fn load_mir(&self, instance: ty::InstanceDef<'tcx>) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> { + trace!("load mir {:?}", instance); + match instance { + ty::InstanceDef::Item(def_id) => self.tcx.maybe_optimized_mir(def_id).ok_or_else(|| EvalError::NoMirFor(self.tcx.item_path_str(def_id))), + _ => Ok(self.tcx.instance_mir(instance)), + } + } + + pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { + // miri doesn't care about lifetimes, and will choke on some crazy ones + // let's simply get rid of them + let without_lifetimes = self.tcx.erase_regions(&ty); + let substituted = without_lifetimes.subst(self.tcx, substs); + self.tcx.normalize_associated_type(&substituted) + } + + pub fn erase_lifetimes(&self, value: &Binder) -> T + where T : TypeFoldable<'tcx> + { + let value = self.tcx.erase_late_bound_regions(value); + self.tcx.erase_regions(&value) + } + + pub(super) fn type_size(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, Option> { + self.type_size_with_substs(ty, self.substs()) + } + + pub(super) fn type_align(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> { + self.type_align_with_substs(ty, self.substs()) + } + + fn type_size_with_substs( + &self, + ty: Ty<'tcx>, + substs: &'tcx Substs<'tcx>, + ) -> EvalResult<'tcx, Option> { + let layout = self.type_layout_with_substs(ty, substs)?; + if layout.is_unsized() { + Ok(None) + } else { + Ok(Some(layout.size(&self.tcx.data_layout).bytes())) + } + } + + fn type_align_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, u64> { + self.type_layout_with_substs(ty, substs).map(|layout| layout.align(&self.tcx.data_layout).abi()) + } + + pub(super) fn type_layout(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, &'tcx Layout> { + self.type_layout_with_substs(ty, self.substs()) + } + + fn type_layout_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, &'tcx Layout> { + // TODO(solson): Is this inefficient? Needs investigation. + let ty = self.monomorphize(ty, substs); + + ty.layout(self.tcx, ty::ParamEnv::empty(Reveal::All)).map_err(EvalError::Layout) + } + + pub fn push_stack_frame( + &mut self, + instance: ty::Instance<'tcx>, + span: codemap::Span, + mir: &'tcx mir::Mir<'tcx>, + return_lvalue: Lvalue<'tcx>, + return_to_block: StackPopCleanup, + ) -> EvalResult<'tcx> { + ::log_settings::settings().indentation += 1; + + /// Return the set of locals that have a storage annotation anywhere + fn collect_storage_annotations<'tcx>(mir: &'tcx mir::Mir<'tcx>) -> HashSet { + use rustc::mir::StatementKind::*; + + let mut set = HashSet::new(); + for block in mir.basic_blocks() { + for stmt in block.statements.iter() { + match stmt.kind { + StorageLive(mir::Lvalue::Local(local)) | StorageDead(mir::Lvalue::Local(local)) => { + set.insert(local); + } + _ => {} + } + } + }; + set + } + + // Subtract 1 because `local_decls` includes the ReturnMemoryPointer, but we don't store a local + // `Value` for that. + let annotated_locals = collect_storage_annotations(mir); + let num_locals = mir.local_decls.len() - 1; + let mut locals = vec![None; num_locals]; + for i in 0..num_locals { + let local = mir::Local::new(i+1); + if !annotated_locals.contains(&local) { + locals[i] = Some(Value::ByVal(PrimVal::Undef)); + } + } + + self.stack.push(Frame { + mir, + block: mir::START_BLOCK, + return_to_block, + return_lvalue, + locals, + span, + instance, + stmt: 0, + }); + + if self.stack.len() > self.stack_limit { + Err(EvalError::StackFrameLimitReached) + } else { + Ok(()) + } + } + + pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> { + ::log_settings::settings().indentation -= 1; + let frame = self.stack.pop().expect("tried to pop a stack frame, but there were none"); + match frame.return_to_block { + StackPopCleanup::MarkStatic(mutable) => if let Lvalue::Global(id) = frame.return_lvalue { + let global_value = self.globals.get_mut(&id) + .expect("global should have been cached (static)"); + match global_value.value { + // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions + Value::ByRef(ptr, _aligned) => + // Alignment does not matter for this call + self.memory.mark_static_initalized(ptr.to_ptr()?.alloc_id, mutable)?, + Value::ByVal(val) => if let PrimVal::Ptr(ptr) = val { + self.memory.mark_inner_allocation(ptr.alloc_id, mutable)?; + }, + Value::ByValPair(val1, val2) => { + if let PrimVal::Ptr(ptr) = val1 { + self.memory.mark_inner_allocation(ptr.alloc_id, mutable)?; + } + if let PrimVal::Ptr(ptr) = val2 { + self.memory.mark_inner_allocation(ptr.alloc_id, mutable)?; + } + }, + } + // see comment on `initialized` field + assert!(!global_value.initialized); + global_value.initialized = true; + assert_eq!(global_value.mutable, Mutability::Mutable); + global_value.mutable = mutable; + } else { + bug!("StackPopCleanup::MarkStatic on: {:?}", frame.return_lvalue); + }, + StackPopCleanup::Goto(target) => self.goto_block(target), + StackPopCleanup::None => {}, + StackPopCleanup::Tls(key) => { + // either fetch the next dtor or start new from the beginning, if any are left with a non-null data + let dtor = match self.memory.fetch_tls_dtor(key)? { + dtor @ Some(_) => dtor, + None => self.memory.fetch_tls_dtor(None)?, + }; + if let Some((instance, ptr, key)) = dtor { + trace!("Running TLS dtor {:?} on {:?}", instance, ptr); + // TODO: Potentially, this has to support all the other possible instances? See eval_fn_call in terminator/mod.rs + let mir = self.load_mir(instance.def)?; + self.push_stack_frame( + instance, + mir.span, + mir, + Lvalue::undef(), + StackPopCleanup::Tls(Some(key)), + )?; + let arg_local = self.frame().mir.args_iter().next().ok_or(EvalError::AbiViolation("TLS dtor does not take enough arguments.".to_owned()))?; + let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; + let ty = self.tcx.mk_mut_ptr(self.tcx.types.u8); + self.write_ptr(dest, ptr, ty)?; + } + } + } + // deallocate all locals that are backed by an allocation + for local in frame.locals { + self.deallocate_local(local)?; + } + + Ok(()) + } + + pub fn deallocate_local(&mut self, local: Option) -> EvalResult<'tcx> { + if let Some(Value::ByRef(ptr, _aligned)) = local { + trace!("deallocating local"); + let ptr = ptr.to_ptr()?; + self.memory.dump_alloc(ptr.alloc_id); + match self.memory.get(ptr.alloc_id)?.kind { + // for a constant like `const FOO: &i32 = &1;` the local containing + // the `1` is referred to by the global. We transitively marked everything + // the global refers to as static itself, so we don't free it here + MemoryKind::Static => {} + MemoryKind::Stack => self.memory.deallocate(ptr, None, MemoryKind::Stack)?, + other => bug!("local contained non-stack memory: {:?}", other), + } + }; + Ok(()) + } + + pub fn assign_discr_and_fields< + V: IntoValTyPair<'tcx>, + J: IntoIterator, + >( + &mut self, + dest: Lvalue<'tcx>, + dest_ty: Ty<'tcx>, + discr_offset: u64, + operands: J, + discr_val: u128, + variant_idx: usize, + discr_size: u64, + ) -> EvalResult<'tcx> + where J::IntoIter: ExactSizeIterator, + { + // FIXME(solson) + let dest_ptr = self.force_allocation(dest)?.to_ptr()?; + + let discr_dest = dest_ptr.offset(discr_offset, self.memory.layout)?; + self.memory.write_uint(discr_dest, discr_val, discr_size)?; + + let dest = Lvalue::Ptr { + ptr: dest_ptr.into(), + extra: LvalueExtra::DowncastVariant(variant_idx), + aligned: true, + }; + + self.assign_fields(dest, dest_ty, operands) + } + + pub fn assign_fields< + V: IntoValTyPair<'tcx>, + J: IntoIterator, + >( + &mut self, + dest: Lvalue<'tcx>, + dest_ty: Ty<'tcx>, + operands: J, + ) -> EvalResult<'tcx> + where J::IntoIter: ExactSizeIterator, + { + if self.type_size(dest_ty)? == Some(0) { + // zst assigning is a nop + return Ok(()); + } + if self.ty_to_primval_kind(dest_ty).is_ok() { + let mut iter = operands.into_iter(); + assert_eq!(iter.len(), 1); + let (value, value_ty) = iter.next().unwrap().into_val_ty_pair(self)?; + return self.write_value(value, dest, value_ty); + } + for (field_index, operand) in operands.into_iter().enumerate() { + let (value, value_ty) = operand.into_val_ty_pair(self)?; + let field_dest = self.lvalue_field(dest, field_index, dest_ty, value_ty)?; + self.write_value(value, field_dest, value_ty)?; + } + Ok(()) + } + + /// Evaluate an assignment statement. + /// + /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue + /// type writes its results directly into the memory specified by the lvalue. + pub(super) fn eval_rvalue_into_lvalue( + &mut self, + rvalue: &mir::Rvalue<'tcx>, + lvalue: &mir::Lvalue<'tcx>, + ) -> EvalResult<'tcx> { + let dest = self.eval_lvalue(lvalue)?; + let dest_ty = self.lvalue_ty(lvalue); + let dest_layout = self.type_layout(dest_ty)?; + + use rustc::mir::Rvalue::*; + match *rvalue { + Use(ref operand) => { + let value = self.eval_operand(operand)?; + self.write_value(value, dest, dest_ty)?; + } + + BinaryOp(bin_op, ref left, ref right) => { + if self.intrinsic_overflowing(bin_op, left, right, dest, dest_ty)? { + // There was an overflow in an unchecked binop. Right now, we consider this an error and bail out. + // The rationale is that the reason rustc emits unchecked binops in release mode (vs. the checked binops + // it emits in debug mode) is performance, but it doesn't cost us any performance in miri. + // If, however, the compiler ever starts transforming unchecked intrinsics into unchecked binops, + // we have to go back to just ignoring the overflow here. + return Err(EvalError::OverflowingMath); + } + } + + CheckedBinaryOp(bin_op, ref left, ref right) => { + self.intrinsic_with_overflow(bin_op, left, right, dest, dest_ty)?; + } + + UnaryOp(un_op, ref operand) => { + let val = self.eval_operand_to_primval(operand)?; + let kind = self.ty_to_primval_kind(dest_ty)?; + self.write_primval(dest, operator::unary_op(un_op, val, kind)?, dest_ty)?; + } + + // Skip everything for zsts + Aggregate(..) if self.type_size(dest_ty)? == Some(0) => {} + + Aggregate(ref kind, ref operands) => { + self.inc_step_counter_and_check_limit(operands.len() as u64)?; + use rustc::ty::layout::Layout::*; + match *dest_layout { + Univariant { .. } | Array { .. } => { + self.assign_fields(dest, dest_ty, operands)?; + } + + General { discr, ref variants, .. } => { + if let mir::AggregateKind::Adt(adt_def, variant, _, _) = **kind { + let discr_val = adt_def.discriminants(self.tcx) + .nth(variant) + .expect("broken mir: Adt variant id invalid") + .to_u128_unchecked(); + let discr_size = discr.size().bytes(); + + self.assign_discr_and_fields( + dest, + dest_ty, + variants[variant].offsets[0].bytes(), + operands, + discr_val, + variant, + discr_size, + )?; + } else { + bug!("tried to assign {:?} to Layout::General", kind); + } + } + + RawNullablePointer { nndiscr, .. } => { + if let mir::AggregateKind::Adt(_, variant, _, _) = **kind { + if nndiscr == variant as u64 { + assert_eq!(operands.len(), 1); + let operand = &operands[0]; + let value = self.eval_operand(operand)?; + let value_ty = self.operand_ty(operand); + self.write_value(value, dest, value_ty)?; + } else { + if let Some(operand) = operands.get(0) { + assert_eq!(operands.len(), 1); + let operand_ty = self.operand_ty(operand); + assert_eq!(self.type_size(operand_ty)?, Some(0)); + } + self.write_null(dest, dest_ty)?; + } + } else { + bug!("tried to assign {:?} to Layout::RawNullablePointer", kind); + } + } + + StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { + if let mir::AggregateKind::Adt(_, variant, _, _) = **kind { + if nndiscr == variant as u64 { + self.assign_fields(dest, dest_ty, operands)?; + } else { + for operand in operands { + let operand_ty = self.operand_ty(operand); + assert_eq!(self.type_size(operand_ty)?, Some(0)); + } + let (offset, ty) = self.nonnull_offset_and_ty(dest_ty, nndiscr, discrfield)?; + + // FIXME(solson) + let dest = self.force_allocation(dest)?.to_ptr()?; + + let dest = dest.offset(offset.bytes(), self.memory.layout)?; + let dest_size = self.type_size(ty)? + .expect("bad StructWrappedNullablePointer discrfield"); + self.memory.write_int(dest, 0, dest_size)?; + } + } else { + bug!("tried to assign {:?} to Layout::RawNullablePointer", kind); + } + } + + CEnum { .. } => { + assert_eq!(operands.len(), 0); + if let mir::AggregateKind::Adt(adt_def, variant, _, _) = **kind { + let n = adt_def.discriminants(self.tcx) + .nth(variant) + .expect("broken mir: Adt variant index invalid") + .to_u128_unchecked(); + self.write_primval(dest, PrimVal::Bytes(n), dest_ty)?; + } else { + bug!("tried to assign {:?} to Layout::CEnum", kind); + } + } + + Vector { count, .. } => { + debug_assert_eq!(count, operands.len() as u64); + self.assign_fields(dest, dest_ty, operands)?; + } + + UntaggedUnion { .. } => { + assert_eq!(operands.len(), 1); + let operand = &operands[0]; + let value = self.eval_operand(operand)?; + let value_ty = self.operand_ty(operand); + self.write_value(value, dest, value_ty)?; + } + + _ => { + return Err(EvalError::Unimplemented(format!( + "can't handle destination layout {:?} when assigning {:?}", + dest_layout, + kind + ))); + } + } + } + + Repeat(ref operand, _) => { + let (elem_ty, length) = match dest_ty.sty { + ty::TyArray(elem_ty, n) => (elem_ty, n as u64), + _ => bug!("tried to assign array-repeat to non-array type {:?}", dest_ty), + }; + self.inc_step_counter_and_check_limit(length)?; + let elem_size = self.type_size(elem_ty)? + .expect("repeat element type must be sized"); + let value = self.eval_operand(operand)?; + + // FIXME(solson) + let dest = Pointer::from(self.force_allocation(dest)?.to_ptr()?); + + for i in 0..length { + let elem_dest = dest.offset(i * elem_size, self.memory.layout)?; + self.write_value_to_ptr(value, elem_dest, elem_ty)?; + } + } + + Len(ref lvalue) => { + if self.const_env() { + return Err(EvalError::NeedsRfc("computing the length of arrays".to_string())); + } + let src = self.eval_lvalue(lvalue)?; + let ty = self.lvalue_ty(lvalue); + let (_, len) = src.elem_ty_and_len(ty); + self.write_primval(dest, PrimVal::from_u128(len as u128), dest_ty)?; + } + + Ref(_, _, ref lvalue) => { + let src = self.eval_lvalue(lvalue)?; + // We ignore the alignment of the lvalue here -- this rvalue produces sth. of type &, which must always be aligned. + let (ptr, extra, _aligned) = self.force_allocation(src)?.to_ptr_extra_aligned(); + let ty = self.lvalue_ty(lvalue); + + let val = match extra { + LvalueExtra::None => ptr.to_value(), + LvalueExtra::Length(len) => ptr.to_value_with_len(len), + LvalueExtra::Vtable(vtable) => ptr.to_value_with_vtable(vtable), + LvalueExtra::DowncastVariant(..) => + bug!("attempted to take a reference to an enum downcast lvalue"), + }; + + // Check alignment and non-NULLness. + let (_, align) = self.size_and_align_of_dst(ty, val)?; + self.memory.check_align(ptr, align)?; + + self.write_value(val, dest, dest_ty)?; + } + + NullaryOp(mir::NullOp::Box, ty) => { + if self.const_env() { + return Err(EvalError::NeedsRfc("\"heap\" allocations".to_string())); + } + // FIXME: call the `exchange_malloc` lang item if available + let size = self.type_size(ty)?.expect("box only works with sized types"); + if size == 0 { + let align = self.type_align(ty)?; + self.write_primval(dest, PrimVal::Bytes(align.into()), dest_ty)?; + } else { + let align = self.type_align(ty)?; + let ptr = self.memory.allocate(size, align, MemoryKind::Rust)?; + self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?; + } + } + + NullaryOp(mir::NullOp::SizeOf, ty) => { + if self.const_env() { + return Err(EvalError::NeedsRfc("computing the size of types (size_of)".to_string())); + } + let size = self.type_size(ty)?.expect("SizeOf nullary MIR operator called for unsized type"); + self.write_primval(dest, PrimVal::from_u128(size as u128), dest_ty)?; + } + + Cast(kind, ref operand, cast_ty) => { + debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest_ty); + use rustc::mir::CastKind::*; + match kind { + Unsize => { + let src = self.eval_operand(operand)?; + let src_ty = self.operand_ty(operand); + self.unsize_into(src, src_ty, dest, dest_ty)?; + } + + Misc => { + let src = self.eval_operand(operand)?; + let src_ty = self.operand_ty(operand); + if self.type_is_fat_ptr(src_ty) { + match (src, self.type_is_fat_ptr(dest_ty)) { + (Value::ByRef(..), _) | + (Value::ByValPair(..), true) => { + self.write_value(src, dest, dest_ty)?; + }, + (Value::ByValPair(data, _), false) => { + self.write_value(Value::ByVal(data), dest, dest_ty)?; + }, + (Value::ByVal(_), _) => bug!("expected fat ptr"), + } + } else { + let src_val = self.value_to_primval(src, src_ty)?; + let dest_val = self.cast_primval(src_val, src_ty, dest_ty)?; + self.write_value(Value::ByVal(dest_val), dest, dest_ty)?; + } + } + + ReifyFnPointer => match self.operand_ty(operand).sty { + ty::TyFnDef(def_id, substs) => { + let instance = resolve(self.tcx, def_id, substs); + let fn_ptr = self.memory.create_fn_alloc(instance); + self.write_value(Value::ByVal(PrimVal::Ptr(fn_ptr)), dest, dest_ty)?; + }, + ref other => bug!("reify fn pointer on {:?}", other), + }, + + UnsafeFnPointer => match dest_ty.sty { + ty::TyFnPtr(_) => { + let src = self.eval_operand(operand)?; + self.write_value(src, dest, dest_ty)?; + }, + ref other => bug!("fn to unsafe fn cast on {:?}", other), + }, + + ClosureFnPointer => match self.operand_ty(operand).sty { + ty::TyClosure(def_id, substs) => { + let instance = resolve_closure(self.tcx, def_id, substs, ty::ClosureKind::FnOnce); + let fn_ptr = self.memory.create_fn_alloc(instance); + self.write_value(Value::ByVal(PrimVal::Ptr(fn_ptr)), dest, dest_ty)?; + }, + ref other => bug!("closure fn pointer on {:?}", other), + }, + } + } + + Discriminant(ref lvalue) => { + let lval = self.eval_lvalue(lvalue)?; + let ty = self.lvalue_ty(lvalue); + let ptr = self.force_allocation(lval)?.to_ptr()?; + let discr_val = self.read_discriminant_value(ptr, ty)?; + if let ty::TyAdt(adt_def, _) = ty.sty { + if adt_def.discriminants(self.tcx).all(|v| discr_val != v.to_u128_unchecked()) { + return Err(EvalError::InvalidDiscriminant); + } + } else { + bug!("rustc only generates Rvalue::Discriminant for enums"); + } + self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?; + }, + } + + if log_enabled!(::log::LogLevel::Trace) { + self.dump_local(dest); + } + + Ok(()) + } + + pub(super) fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool { + match ty.sty { + ty::TyRawPtr(ref tam) | + ty::TyRef(_, ref tam) => !self.type_is_sized(tam.ty), + ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()), + _ => false, + } + } + + pub(super) fn nonnull_offset_and_ty( + &self, + ty: Ty<'tcx>, + nndiscr: u64, + discrfield: &[u32], + ) -> EvalResult<'tcx, (Size, Ty<'tcx>)> { + // Skip the constant 0 at the start meant for LLVM GEP and the outer non-null variant + let path = discrfield.iter().skip(2).map(|&i| i as usize); + + // Handle the field index for the outer non-null variant. + let (inner_offset, inner_ty) = match ty.sty { + ty::TyAdt(adt_def, substs) => { + let variant = &adt_def.variants[nndiscr as usize]; + let index = discrfield[1]; + let field = &variant.fields[index as usize]; + (self.get_field_offset(ty, index as usize)?, field.ty(self.tcx, substs)) + } + _ => bug!("non-enum for StructWrappedNullablePointer: {}", ty), + }; + + self.field_path_offset_and_ty(inner_offset, inner_ty, path) + } + + fn field_path_offset_and_ty>( + &self, + mut offset: Size, + mut ty: Ty<'tcx>, + path: I, + ) -> EvalResult<'tcx, (Size, Ty<'tcx>)> { + // Skip the initial 0 intended for LLVM GEP. + for field_index in path { + let field_offset = self.get_field_offset(ty, field_index)?; + trace!("field_path_offset_and_ty: {}, {}, {:?}, {:?}", field_index, ty, field_offset, offset); + ty = self.get_field_ty(ty, field_index)?; + offset = offset.checked_add(field_offset, &self.tcx.data_layout).unwrap(); + } + + Ok((offset, ty)) + } + fn get_fat_field(&self, pointee_ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Ty<'tcx>> { + match (field_index, &self.tcx.struct_tail(pointee_ty).sty) { + (1, &ty::TyStr) | + (1, &ty::TySlice(_)) => Ok(self.tcx.types.usize), + (1, &ty::TyDynamic(..)) | + (0, _) => Ok(self.tcx.mk_imm_ptr(self.tcx.types.u8)), + _ => bug!("invalid fat pointee type: {}", pointee_ty), + } + } + + pub fn get_field_ty(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Ty<'tcx>> { + match ty.sty { + ty::TyAdt(adt_def, _) if adt_def.is_box() => self.get_fat_field(ty.boxed_ty(), field_index), + ty::TyAdt(adt_def, substs) if adt_def.is_enum() => { + use rustc::ty::layout::Layout::*; + match *self.type_layout(ty)? { + RawNullablePointer { nndiscr, .. } | + StructWrappedNullablePointer { nndiscr, .. } => Ok(adt_def.variants[nndiscr as usize].fields[field_index].ty(self.tcx, substs)), + _ => Err(EvalError::Unimplemented(format!("get_field_ty can't handle enum type: {:?}, {:?}", ty, ty.sty))), + } + } + ty::TyAdt(adt_def, substs) => { + Ok(adt_def.struct_variant().fields[field_index].ty(self.tcx, substs)) + } + + ty::TyTuple(fields, _) => Ok(fields[field_index]), + + ty::TyRef(_, ref tam) | + ty::TyRawPtr(ref tam) => self.get_fat_field(tam.ty, field_index), + + ty::TyArray(ref inner, _) => Ok(inner), + + _ => Err(EvalError::Unimplemented(format!("can't handle type: {:?}, {:?}", ty, ty.sty))), + } + } + + fn get_field_offset(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Size> { + let layout = self.type_layout(ty)?; + + use rustc::ty::layout::Layout::*; + match *layout { + Univariant { ref variant, .. } => { + Ok(variant.offsets[field_index]) + } + FatPointer { .. } => { + let bytes = field_index as u64 * self.memory.pointer_size(); + Ok(Size::from_bytes(bytes)) + } + StructWrappedNullablePointer { ref nonnull, .. } => { + Ok(nonnull.offsets[field_index]) + } + _ => { + let msg = format!("can't handle type: {:?}, with layout: {:?}", ty, layout); + Err(EvalError::Unimplemented(msg)) + } + } + } + + pub fn get_field_count(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> { + let layout = self.type_layout(ty)?; + + use rustc::ty::layout::Layout::*; + match *layout { + Univariant { ref variant, .. } => Ok(variant.offsets.len() as u64), + FatPointer { .. } => Ok(2), + StructWrappedNullablePointer { ref nonnull, .. } => Ok(nonnull.offsets.len() as u64), + Vector { count , .. } | + Array { count, .. } => Ok(count), + Scalar { .. } => Ok(0), + _ => { + let msg = format!("can't handle type: {:?}, with layout: {:?}", ty, layout); + Err(EvalError::Unimplemented(msg)) + } + } + } + + pub(super) fn wrapping_pointer_offset(&self, ptr: Pointer, pointee_ty: Ty<'tcx>, offset: i64) -> EvalResult<'tcx, Pointer> { + // FIXME: assuming here that type size is < i64::max_value() + let pointee_size = self.type_size(pointee_ty)?.expect("cannot offset a pointer to an unsized type") as i64; + let offset = offset.overflowing_mul(pointee_size).0; + ptr.wrapping_signed_offset(offset, self.memory.layout) + } + + pub(super) fn pointer_offset(&self, ptr: Pointer, pointee_ty: Ty<'tcx>, offset: i64) -> EvalResult<'tcx, Pointer> { + // This function raises an error if the offset moves the pointer outside of its allocation. We consider + // ZSTs their own huge allocation that doesn't overlap with anything (and nothing moves in there because the size is 0). + // We also consider the NULL pointer its own separate allocation, and all the remaining integers pointers their own + // allocation. + + if ptr.is_null()? { // NULL pointers must only be offset by 0 + return if offset == 0 { Ok(ptr) } else { Err(EvalError::InvalidNullPointerUsage) }; + } + // FIXME: assuming here that type size is < i64::max_value() + let pointee_size = self.type_size(pointee_ty)?.expect("cannot offset a pointer to an unsized type") as i64; + return if let Some(offset) = offset.checked_mul(pointee_size) { + let ptr = ptr.signed_offset(offset, self.memory.layout)?; + // Do not do bounds-checking for integers; they can never alias a normal pointer anyway. + if let PrimVal::Ptr(ptr) = ptr.into_inner_primval() { + self.memory.check_bounds(ptr, false)?; + } else if ptr.is_null()? { + // We moved *to* a NULL pointer. That seems wrong, LLVM considers the NULL pointer its own small allocation. Reject this, for now. + return Err(EvalError::InvalidNullPointerUsage); + } + Ok(ptr) + } else { + Err(EvalError::OverflowingMath) + } + } + + pub(super) fn eval_operand_to_primval(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, PrimVal> { + let value = self.eval_operand(op)?; + let ty = self.operand_ty(op); + self.value_to_primval(value, ty) + } + + pub(super) fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, Value> { + use rustc::mir::Operand::*; + match *op { + Consume(ref lvalue) => self.eval_and_read_lvalue(lvalue), + + Constant(ref constant) => { + use rustc::mir::Literal; + let mir::Constant { ref literal, .. } = **constant; + let value = match *literal { + Literal::Value { ref value } => self.const_to_value(value)?, + + Literal::Item { def_id, substs } => { + let instance = self.resolve_associated_const(def_id, substs); + let cid = GlobalId { instance, promoted: None }; + self.globals.get(&cid).expect("static/const not cached").value + } + + Literal::Promoted { index } => { + let cid = GlobalId { + instance: self.frame().instance, + promoted: Some(index), + }; + self.globals.get(&cid).expect("promoted not cached").value + } + }; + + Ok(value) + } + } + } + + pub(super) fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> { + self.monomorphize(operand.ty(self.mir(), self.tcx), self.substs()) + } + + fn copy(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx> { + let size = self.type_size(ty)?.expect("cannot copy from an unsized type"); + let align = self.type_align(ty)?; + self.memory.copy(src, dest, size, align, false)?; + Ok(()) + } + + pub(super) fn force_allocation( + &mut self, + lvalue: Lvalue<'tcx>, + ) -> EvalResult<'tcx, Lvalue<'tcx>> { + let new_lvalue = match lvalue { + Lvalue::Local { frame, local } => { + // -1 since we don't store the return value + match self.stack[frame].locals[local.index() - 1] { + None => return Err(EvalError::DeadLocal), + Some(Value::ByRef(ptr, aligned)) => { + Lvalue::Ptr { ptr, aligned, extra: LvalueExtra::None } + }, + Some(val) => { + let ty = self.stack[frame].mir.local_decls[local].ty; + let ty = self.monomorphize(ty, self.stack[frame].instance.substs); + let substs = self.stack[frame].instance.substs; + let ptr = self.alloc_ptr_with_substs(ty, substs)?; + self.stack[frame].locals[local.index() - 1] = Some(Value::by_ref(ptr.into())); // it stays live + self.write_value_to_ptr(val, ptr.into(), ty)?; + Lvalue::from_ptr(ptr) + } + } + } + Lvalue::Ptr { .. } => lvalue, + Lvalue::Global(cid) => { + let global_val = self.globals.get(&cid).expect("global not cached").clone(); + match global_val.value { + Value::ByRef(ptr, aligned) => + Lvalue::Ptr { ptr, aligned, extra: LvalueExtra::None }, + _ => { + let ptr = self.alloc_ptr_with_substs(global_val.ty, cid.instance.substs)?; + self.memory.mark_static(ptr.alloc_id); + self.write_value_to_ptr(global_val.value, ptr.into(), global_val.ty)?; + // see comment on `initialized` field + if global_val.initialized { + self.memory.mark_static_initalized(ptr.alloc_id, global_val.mutable)?; + } + let lval = self.globals.get_mut(&cid).expect("already checked"); + *lval = Global { + value: Value::by_ref(ptr.into()), + .. global_val + }; + Lvalue::from_ptr(ptr) + }, + } + } + }; + Ok(new_lvalue) + } + + /// ensures this Value is not a ByRef + pub(super) fn follow_by_ref_value(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { + match value { + Value::ByRef(ptr, aligned) => { + self.read_maybe_aligned(aligned, |ectx| ectx.read_value(ptr, ty)) + } + other => Ok(other), + } + } + + pub(super) fn value_to_primval(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> { + match self.follow_by_ref_value(value, ty)? { + Value::ByRef(..) => bug!("follow_by_ref_value can't result in `ByRef`"), + + Value::ByVal(primval) => { + self.ensure_valid_value(primval, ty)?; + Ok(primval) + } + + Value::ByValPair(..) => bug!("value_to_primval can't work with fat pointers"), + } + } + + pub(super) fn write_null( + &mut self, + dest: Lvalue<'tcx>, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + self.write_primval(dest, PrimVal::Bytes(0), dest_ty) + } + + pub(super) fn write_ptr( + &mut self, + dest: Lvalue<'tcx>, + val: Pointer, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + self.write_value(val.to_value(), dest, dest_ty) + } + + pub(super) fn write_primval( + &mut self, + dest: Lvalue<'tcx>, + val: PrimVal, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + self.write_value(Value::ByVal(val), dest, dest_ty) + } + + pub(super) fn write_value( + &mut self, + src_val: Value, + dest: Lvalue<'tcx>, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + // Note that it is really important that the type here is the right one, and matches the type things are read at. + // In case `src_val` is a `ByValPair`, we don't do any magic here to handle padding properly, which is only + // correct if we never look at this data with the wrong type. + + match dest { + Lvalue::Global(cid) => { + let dest = self.globals.get_mut(&cid).expect("global should be cached").clone(); + if dest.mutable == Mutability::Immutable { + return Err(EvalError::ModifiedConstantMemory); + } + let write_dest = |this: &mut Self, val| { + *this.globals.get_mut(&cid).expect("already checked") = Global { + value: val, + ..dest + }; + Ok(()) + }; + self.write_value_possibly_by_val(src_val, write_dest, dest.value, dest_ty) + }, + + Lvalue::Ptr { ptr, extra, aligned } => { + assert_eq!(extra, LvalueExtra::None); + self.write_maybe_aligned(aligned, + |ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty)) + } + + Lvalue::Local { frame, local } => { + let dest = self.stack[frame].get_local(local)?; + self.write_value_possibly_by_val( + src_val, + |this, val| this.stack[frame].set_local(local, val), + dest, + dest_ty, + ) + } + } + } + + // The cases here can be a bit subtle. Read carefully! + fn write_value_possibly_by_val EvalResult<'tcx>>( + &mut self, + src_val: Value, + write_dest: F, + old_dest_val: Value, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + if let Value::ByRef(dest_ptr, aligned) = old_dest_val { + // If the value is already `ByRef` (that is, backed by an `Allocation`), + // then we must write the new value into this allocation, because there may be + // other pointers into the allocation. These other pointers are logically + // pointers into the local variable, and must be able to observe the change. + // + // Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we + // knew for certain that there were no outstanding pointers to this allocation. + self.write_maybe_aligned(aligned, + |ectx| ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty))?; + + } else if let Value::ByRef(src_ptr, aligned) = src_val { + // If the value is not `ByRef`, then we know there are no pointers to it + // and we can simply overwrite the `Value` in the locals array directly. + // + // In this specific case, where the source value is `ByRef`, we must duplicate + // the allocation, because this is a by-value operation. It would be incorrect + // if they referred to the same allocation, since then a change to one would + // implicitly change the other. + // + // It is a valid optimization to attempt reading a primitive value out of the + // source and write that into the destination without making an allocation, so + // we do so here. + self.read_maybe_aligned(aligned, |ectx| { + if let Ok(Some(src_val)) = ectx.try_read_value(src_ptr, dest_ty) { + write_dest(ectx, src_val)?; + } else { + let dest_ptr = ectx.alloc_ptr(dest_ty)?.into(); + ectx.copy(src_ptr, dest_ptr, dest_ty)?; + write_dest(ectx, Value::by_ref(dest_ptr))?; + } + Ok(()) + })?; + + } else { + // Finally, we have the simple case where neither source nor destination are + // `ByRef`. We may simply copy the source value over the the destintion. + write_dest(self, src_val)?; + } + Ok(()) + } + + pub(super) fn write_value_to_ptr( + &mut self, + value: Value, + dest: Pointer, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + match value { + Value::ByRef(ptr, aligned) => { + self.read_maybe_aligned(aligned, |ectx| ectx.copy(ptr, dest, dest_ty)) + }, + Value::ByVal(primval) => { + let size = self.type_size(dest_ty)?.expect("dest type must be sized"); + self.memory.write_primval(dest, primval, size) + } + Value::ByValPair(a, b) => self.write_pair_to_ptr(a, b, dest.to_ptr()?, dest_ty), + } + } + + pub(super) fn write_pair_to_ptr( + &mut self, + a: PrimVal, + b: PrimVal, + ptr: MemoryPointer, + mut ty: Ty<'tcx> + ) -> EvalResult<'tcx> { + while self.get_field_count(ty)? == 1 { + ty = self.get_field_ty(ty, 0)?; + } + assert_eq!(self.get_field_count(ty)?, 2); + let field_0 = self.get_field_offset(ty, 0)?.bytes(); + let field_1 = self.get_field_offset(ty, 1)?.bytes(); + let field_0_ty = self.get_field_ty(ty, 0)?; + let field_1_ty = self.get_field_ty(ty, 1)?; + let field_0_size = self.type_size(field_0_ty)?.expect("pair element type must be sized"); + let field_1_size = self.type_size(field_1_ty)?.expect("pair element type must be sized"); + self.memory.write_primval(ptr.offset(field_0, self.memory.layout)?.into(), a, field_0_size)?; + self.memory.write_primval(ptr.offset(field_1, self.memory.layout)?.into(), b, field_1_size)?; + Ok(()) + } + + pub fn ty_to_primval_kind(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimValKind> { + use syntax::ast::FloatTy; + + let kind = match ty.sty { + ty::TyBool => PrimValKind::Bool, + ty::TyChar => PrimValKind::Char, + + ty::TyInt(int_ty) => { + use syntax::ast::IntTy::*; + let size = match int_ty { + I8 => 1, + I16 => 2, + I32 => 4, + I64 => 8, + I128 => 16, + Is => self.memory.pointer_size(), + }; + PrimValKind::from_int_size(size) + } + + ty::TyUint(uint_ty) => { + use syntax::ast::UintTy::*; + let size = match uint_ty { + U8 => 1, + U16 => 2, + U32 => 4, + U64 => 8, + U128 => 16, + Us => self.memory.pointer_size(), + }; + PrimValKind::from_uint_size(size) + } + + ty::TyFloat(FloatTy::F32) => PrimValKind::F32, + ty::TyFloat(FloatTy::F64) => PrimValKind::F64, + + ty::TyFnPtr(_) => PrimValKind::FnPtr, + + ty::TyRef(_, ref tam) | + ty::TyRawPtr(ref tam) if self.type_is_sized(tam.ty) => PrimValKind::Ptr, + + ty::TyAdt(def, _) if def.is_box() => PrimValKind::Ptr, + + ty::TyAdt(def, substs) => { + use rustc::ty::layout::Layout::*; + match *self.type_layout(ty)? { + CEnum { discr, signed, .. } => { + let size = discr.size().bytes(); + if signed { + PrimValKind::from_int_size(size) + } else { + PrimValKind::from_uint_size(size) + } + } + + RawNullablePointer { value, .. } => { + use rustc::ty::layout::Primitive::*; + match value { + // TODO(solson): Does signedness matter here? What should the sign be? + Int(int) => PrimValKind::from_uint_size(int.size().bytes()), + F32 => PrimValKind::F32, + F64 => PrimValKind::F64, + Pointer => PrimValKind::Ptr, + } + } + + // represent single field structs as their single field + Univariant { .. } => { + // enums with just one variant are no different, but `.struct_variant()` doesn't work for enums + let variant = &def.variants[0]; + // FIXME: also allow structs with only a single non zst field + if variant.fields.len() == 1 { + return self.ty_to_primval_kind(variant.fields[0].ty(self.tcx, substs)); + } else { + return Err(EvalError::TypeNotPrimitive(ty)); + } + } + + _ => return Err(EvalError::TypeNotPrimitive(ty)), + } + } + + _ => return Err(EvalError::TypeNotPrimitive(ty)), + }; + + Ok(kind) + } + + fn ensure_valid_value(&self, val: PrimVal, ty: Ty<'tcx>) -> EvalResult<'tcx> { + match ty.sty { + ty::TyBool if val.to_bytes()? > 1 => Err(EvalError::InvalidBool), + + ty::TyChar if ::std::char::from_u32(val.to_bytes()? as u32).is_none() + => Err(EvalError::InvalidChar(val.to_bytes()? as u32 as u128)), + + _ => Ok(()), + } + } + + pub(super) fn read_value(&mut self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { + if let Some(val) = self.try_read_value(ptr, ty)? { + Ok(val) + } else { + bug!("primitive read failed for type: {:?}", ty); + } + } + + pub(crate) fn read_ptr(&self, ptr: MemoryPointer, pointee_ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { + let p = self.memory.read_ptr(ptr)?; + if self.type_is_sized(pointee_ty) { + Ok(p.to_value()) + } else { + trace!("reading fat pointer extra of type {}", pointee_ty); + let extra = ptr.offset(self.memory.pointer_size(), self.memory.layout)?; + match self.tcx.struct_tail(pointee_ty).sty { + ty::TyDynamic(..) => Ok(p.to_value_with_vtable(self.memory.read_ptr(extra)?.to_ptr()?)), + ty::TySlice(..) | + ty::TyStr => Ok(p.to_value_with_len(self.memory.read_usize(extra)?)), + _ => bug!("unsized primval ptr read from {:?}", pointee_ty), + } + } + } + + fn try_read_value(&mut self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Option> { + use syntax::ast::FloatTy; + + let val = match ty.sty { + ty::TyBool => PrimVal::from_bool(self.memory.read_bool(ptr.to_ptr()?)?), + ty::TyChar => { + let c = self.memory.read_uint(ptr.to_ptr()?, 4)? as u32; + match ::std::char::from_u32(c) { + Some(ch) => PrimVal::from_char(ch), + None => return Err(EvalError::InvalidChar(c as u128)), + } + } + + ty::TyInt(int_ty) => { + use syntax::ast::IntTy::*; + let size = match int_ty { + I8 => 1, + I16 => 2, + I32 => 4, + I64 => 8, + I128 => 16, + Is => self.memory.pointer_size(), + }; + // if we transmute a ptr to an isize, reading it back into a primval shouldn't panic + // Due to read_ptr ignoring the sign, we need to jump around some hoops + match self.memory.read_int(ptr.to_ptr()?, size) { + Err(EvalError::ReadPointerAsBytes) if size == self.memory.pointer_size() => self.memory.read_ptr(ptr.to_ptr()?)?.into_inner_primval(), + other => PrimVal::from_i128(other?), + } + } + + ty::TyUint(uint_ty) => { + use syntax::ast::UintTy::*; + let size = match uint_ty { + U8 => 1, + U16 => 2, + U32 => 4, + U64 => 8, + U128 => 16, + Us => self.memory.pointer_size(), + }; + if size == self.memory.pointer_size() { + // if we transmute a ptr to an usize, reading it back into a primval shouldn't panic + self.memory.read_ptr(ptr.to_ptr()?)?.into_inner_primval() + } else { + PrimVal::from_u128(self.memory.read_uint(ptr.to_ptr()?, size)?) + } + } + + ty::TyFloat(FloatTy::F32) => PrimVal::from_f32(self.memory.read_f32(ptr.to_ptr()?)?), + ty::TyFloat(FloatTy::F64) => PrimVal::from_f64(self.memory.read_f64(ptr.to_ptr()?)?), + + ty::TyFnPtr(_) => self.memory.read_ptr(ptr.to_ptr()?)?.into_inner_primval(), + ty::TyRef(_, ref tam) | + ty::TyRawPtr(ref tam) => return self.read_ptr(ptr.to_ptr()?, tam.ty).map(Some), + + ty::TyAdt(def, _) => { + if def.is_box() { + return self.read_ptr(ptr.to_ptr()?, ty.boxed_ty()).map(Some); + } + use rustc::ty::layout::Layout::*; + if let CEnum { discr, signed, .. } = *self.type_layout(ty)? { + let size = discr.size().bytes(); + if signed { + PrimVal::from_i128(self.memory.read_int(ptr.to_ptr()?, size)?) + } else { + PrimVal::from_u128(self.memory.read_uint(ptr.to_ptr()?, size)?) + } + } else { + return Ok(None); + } + }, + + _ => return Ok(None), + }; + + Ok(Some(Value::ByVal(val))) + } + + pub(super) fn frame(&self) -> &Frame<'tcx> { + self.stack.last().expect("no call frames exist") + } + + pub(super) fn frame_mut(&mut self) -> &mut Frame<'tcx> { + self.stack.last_mut().expect("no call frames exist") + } + + pub(super) fn mir(&self) -> &'tcx mir::Mir<'tcx> { + self.frame().mir + } + + pub(super) fn substs(&self) -> &'tcx Substs<'tcx> { + self.frame().instance.substs + } + + fn unsize_into_ptr( + &mut self, + src: Value, + src_ty: Ty<'tcx>, + dest: Lvalue<'tcx>, + dest_ty: Ty<'tcx>, + sty: Ty<'tcx>, + dty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + // A -> A conversion + let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty); + + match (&src_pointee_ty.sty, &dest_pointee_ty.sty) { + (&ty::TyArray(_, length), &ty::TySlice(_)) => { + let ptr = src.into_ptr(&mut self.memory)?; + // u64 cast is from usize to u64, which is always good + self.write_value(ptr.to_value_with_len(length as u64), dest, dest_ty) + } + (&ty::TyDynamic(..), &ty::TyDynamic(..)) => { + // For now, upcasts are limited to changes in marker + // traits, and hence never actually require an actual + // change to the vtable. + self.write_value(src, dest, dest_ty) + }, + (_, &ty::TyDynamic(ref data, _)) => { + let trait_ref = data.principal().unwrap().with_self_ty(self.tcx, src_pointee_ty); + let trait_ref = self.tcx.erase_regions(&trait_ref); + let vtable = self.get_vtable(src_pointee_ty, trait_ref)?; + let ptr = src.into_ptr(&mut self.memory)?; + self.write_value(ptr.to_value_with_vtable(vtable), dest, dest_ty) + }, + + _ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty), + } + } + + fn unsize_into( + &mut self, + src: Value, + src_ty: Ty<'tcx>, + dest: Lvalue<'tcx>, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + match (&src_ty.sty, &dest_ty.sty) { + (&ty::TyRef(_, ref s), &ty::TyRef(_, ref d)) | + (&ty::TyRef(_, ref s), &ty::TyRawPtr(ref d)) | + (&ty::TyRawPtr(ref s), &ty::TyRawPtr(ref d)) => self.unsize_into_ptr(src, src_ty, dest, dest_ty, s.ty, d.ty), + (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => { + if def_a.is_box() || def_b.is_box() { + if !def_a.is_box() || !def_b.is_box() { + panic!("invalid unsizing between {:?} -> {:?}", src_ty, dest_ty); + } + return self.unsize_into_ptr(src, src_ty, dest, dest_ty, src_ty.boxed_ty(), dest_ty.boxed_ty()); + } + if self.ty_to_primval_kind(src_ty).is_ok() { + let sty = self.get_field_ty(src_ty, 0)?; + let dty = self.get_field_ty(dest_ty, 0)?; + return self.unsize_into(src, sty, dest, dty); + } + // unsizing of generic struct with pointer fields + // Example: `Arc` -> `Arc` + // here we need to increase the size of every &T thin ptr field to a fat ptr + + assert_eq!(def_a, def_b); + + let src_fields = def_a.variants[0].fields.iter(); + let dst_fields = def_b.variants[0].fields.iter(); + + //let src = adt::MaybeSizedValue::sized(src); + //let dst = adt::MaybeSizedValue::sized(dst); + let src_ptr = match src { + Value::ByRef(ptr, true) => ptr, + // TODO: Is it possible for unaligned pointers to occur here? + _ => bug!("expected aligned pointer, got {:?}", src), + }; + + // FIXME(solson) + let dest = self.force_allocation(dest)?.to_ptr()?; + let iter = src_fields.zip(dst_fields).enumerate(); + for (i, (src_f, dst_f)) in iter { + let src_fty = monomorphize_field_ty(self.tcx, src_f, substs_a); + let dst_fty = monomorphize_field_ty(self.tcx, dst_f, substs_b); + if self.type_size(dst_fty)? == Some(0) { + continue; + } + let src_field_offset = self.get_field_offset(src_ty, i)?.bytes(); + let dst_field_offset = self.get_field_offset(dest_ty, i)?.bytes(); + let src_f_ptr = src_ptr.offset(src_field_offset, self.memory.layout)?; + let dst_f_ptr = dest.offset(dst_field_offset, self.memory.layout)?; + if src_fty == dst_fty { + self.copy(src_f_ptr, dst_f_ptr.into(), src_fty)?; + } else { + self.unsize_into(Value::by_ref(src_f_ptr), src_fty, Lvalue::from_ptr(dst_f_ptr), dst_fty)?; + } + } + Ok(()) + } + _ => bug!("unsize_into: invalid conversion: {:?} -> {:?}", src_ty, dest_ty), + } + } + + pub(super) fn dump_local(&self, lvalue: Lvalue<'tcx>) { + // Debug output + if let Lvalue::Local { frame, local } = lvalue { + let mut allocs = Vec::new(); + let mut msg = format!("{:?}", local); + let last_frame = self.stack.len() - 1; + if frame != last_frame { + write!(msg, " ({} frames up)", last_frame - frame).unwrap(); + } + write!(msg, ":").unwrap(); + + match self.stack[frame].get_local(local) { + Err(EvalError::DeadLocal) => { + write!(msg, " is dead").unwrap(); + } + Err(err) => { + panic!("Failed to access local: {:?}", err); + } + Ok(Value::ByRef(ptr, aligned)) => match ptr.into_inner_primval() { + PrimVal::Ptr(ptr) => { + write!(msg, " by {}ref:", if aligned { "" } else { "unaligned " }).unwrap(); + allocs.push(ptr.alloc_id); + }, + ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(), + }, + Ok(Value::ByVal(val)) => { + write!(msg, " {:?}", val).unwrap(); + if let PrimVal::Ptr(ptr) = val { allocs.push(ptr.alloc_id); } + } + Ok(Value::ByValPair(val1, val2)) => { + write!(msg, " ({:?}, {:?})", val1, val2).unwrap(); + if let PrimVal::Ptr(ptr) = val1 { allocs.push(ptr.alloc_id); } + if let PrimVal::Ptr(ptr) = val2 { allocs.push(ptr.alloc_id); } + } + } + + trace!("{}", msg); + self.memory.dump_allocs(allocs); + } + } + + /// Convenience function to ensure correct usage of globals and code-sharing with locals. + pub fn modify_global(&mut self, cid: GlobalId<'tcx>, f: F) -> EvalResult<'tcx> + where F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>, + { + let mut val = self.globals.get(&cid).expect("global not cached").clone(); + if val.mutable == Mutability::Immutable { + return Err(EvalError::ModifiedConstantMemory); + } + val.value = f(self, val.value)?; + *self.globals.get_mut(&cid).expect("already checked") = val; + Ok(()) + } + + /// Convenience function to ensure correct usage of locals and code-sharing with globals. + pub fn modify_local( + &mut self, + frame: usize, + local: mir::Local, + f: F, + ) -> EvalResult<'tcx> + where F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>, + { + let val = self.stack[frame].get_local(local)?; + let new_val = f(self, val)?; + self.stack[frame].set_local(local, new_val)?; + // FIXME(solson): Run this when setting to Undef? (See previous version of this code.) + // if let Value::ByRef(ptr) = self.stack[frame].get_local(local) { + // self.memory.deallocate(ptr)?; + // } + Ok(()) + } +} + +impl<'tcx> Frame<'tcx> { + pub fn get_local(&self, local: mir::Local) -> EvalResult<'tcx, Value> { + // Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0. + self.locals[local.index() - 1].ok_or(EvalError::DeadLocal) + } + + fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> { + // Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0. + match self.locals[local.index() - 1] { + None => Err(EvalError::DeadLocal), + Some(ref mut local) => { + *local = value; + Ok(()) + } + } + } + + pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, Option> { + trace!("{:?} is now live", local); + + let old = self.locals[local.index() - 1]; + self.locals[local.index() - 1] = Some(Value::ByVal(PrimVal::Undef)); // StorageLive *always* kills the value that's currently stored + return Ok(old); + } + + /// Returns the old value of the local + pub fn storage_dead(&mut self, local: mir::Local) -> EvalResult<'tcx, Option> { + trace!("{:?} is now dead", local); + + let old = self.locals[local.index() - 1]; + self.locals[local.index() - 1] = None; + return Ok(old); + } +} + +pub fn eval_main<'a, 'tcx: 'a>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + main_id: DefId, + start_wrapper: Option, + limits: ResourceLimits, +) { + fn run_main<'a, 'tcx: 'a>( + ecx: &mut EvalContext<'a, 'tcx>, + main_id: DefId, + start_wrapper: Option, + ) -> EvalResult<'tcx> { + let main_instance = ty::Instance::mono(ecx.tcx, main_id); + let main_mir = ecx.load_mir(main_instance.def)?; + let mut cleanup_ptr = None; // Pointer to be deallocated when we are done + + if !main_mir.return_ty.is_nil() || main_mir.arg_count != 0 { + return Err(EvalError::Unimplemented("miri does not support main functions without `fn()` type signatures".to_owned())); + } + + if let Some(start_id) = start_wrapper { + let start_instance = ty::Instance::mono(ecx.tcx, start_id); + let start_mir = ecx.load_mir(start_instance.def)?; + + if start_mir.arg_count != 3 { + return Err(EvalError::AbiViolation(format!("'start' lang item should have three arguments, but has {}", start_mir.arg_count))); + } + + // Return value + let ret_ptr = ecx.memory.allocate(ecx.tcx.data_layout.pointer_size.bytes(), ecx.tcx.data_layout.pointer_align.abi(), MemoryKind::Stack)?; + cleanup_ptr = Some(ret_ptr); + + // Push our stack frame + ecx.push_stack_frame( + start_instance, + start_mir.span, + start_mir, + Lvalue::from_ptr(ret_ptr), + StackPopCleanup::Tls(None), + )?; + + let mut args = ecx.frame().mir.args_iter(); + + // First argument: pointer to main() + let main_ptr = ecx.memory.create_fn_alloc(main_instance); + let dest = ecx.eval_lvalue(&mir::Lvalue::Local(args.next().unwrap()))?; + let main_ty = main_instance.def.def_ty(ecx.tcx); + let main_ptr_ty = ecx.tcx.mk_fn_ptr(main_ty.fn_sig(ecx.tcx)); + ecx.write_value(Value::ByVal(PrimVal::Ptr(main_ptr)), dest, main_ptr_ty)?; + + // Second argument (argc): 0 + let dest = ecx.eval_lvalue(&mir::Lvalue::Local(args.next().unwrap()))?; + let ty = ecx.tcx.types.isize; + ecx.write_null(dest, ty)?; + + // Third argument (argv): 0 + let dest = ecx.eval_lvalue(&mir::Lvalue::Local(args.next().unwrap()))?; + let ty = ecx.tcx.mk_imm_ptr(ecx.tcx.mk_imm_ptr(ecx.tcx.types.u8)); + ecx.write_null(dest, ty)?; + } else { + ecx.push_stack_frame( + main_instance, + main_mir.span, + main_mir, + Lvalue::undef(), + StackPopCleanup::Tls(None), + )?; + } + + while ecx.step()? {} + if let Some(cleanup_ptr) = cleanup_ptr { + ecx.memory.deallocate(cleanup_ptr, None, MemoryKind::Stack)?; + } + return Ok(()); + } + + let mut ecx = EvalContext::new(tcx, limits); + match run_main(&mut ecx, main_id, start_wrapper) { + Ok(()) => { + let leaks = ecx.memory.leak_report(); + if leaks != 0 { + tcx.sess.err("the evaluated program leaked memory"); + } + } + Err(e) => { + report(tcx, &ecx, &e); + } + } +} + +fn report(tcx: TyCtxt, ecx: &EvalContext, e: &EvalError) { + if let Some(frame) = ecx.stack().last() { + let block = &frame.mir.basic_blocks()[frame.block]; + let span = if frame.stmt < block.statements.len() { + block.statements[frame.stmt].source_info.span + } else { + block.terminator().source_info.span + }; + let mut err = tcx.sess.struct_span_err(span, &e.to_string()); + for &Frame { instance, span, .. } in ecx.stack().iter().rev() { + if tcx.def_key(instance.def_id()).disambiguated_data.data == DefPathData::ClosureExpr { + err.span_note(span, "inside call to closure"); + continue; + } + err.span_note(span, &format!("inside call to {}", instance)); + } + err.emit(); + } else { + tcx.sess.err(&e.to_string()); + } +} + +// TODO(solson): Upstream these methods into rustc::ty::layout. + +pub(super) trait IntegerExt { + fn size(self) -> Size; +} + +impl IntegerExt for layout::Integer { + fn size(self) -> Size { + use rustc::ty::layout::Integer::*; + match self { + I1 | I8 => Size::from_bits(8), + I16 => Size::from_bits(16), + I32 => Size::from_bits(32), + I64 => Size::from_bits(64), + I128 => Size::from_bits(128), + } + } +} + + +pub fn monomorphize_field_ty<'a, 'tcx:'a >(tcx: TyCtxt<'a, 'tcx, 'tcx>, f: &ty::FieldDef, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { + let substituted = f.ty(tcx, substs); + tcx.normalize_associated_type(&substituted) +} + +pub fn is_inhabited<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.uninhabited_from(&mut HashMap::default(), tcx).is_empty() +} + +pub trait IntoValTyPair<'tcx> { + fn into_val_ty_pair<'a>(self, ecx: &mut EvalContext<'a, 'tcx>) -> EvalResult<'tcx, (Value, Ty<'tcx>)> where 'tcx: 'a; +} + +impl<'tcx> IntoValTyPair<'tcx> for (Value, Ty<'tcx>) { + fn into_val_ty_pair<'a>(self, _: &mut EvalContext<'a, 'tcx>) -> EvalResult<'tcx, (Value, Ty<'tcx>)> where 'tcx: 'a { + Ok(self) + } +} + +impl<'b, 'tcx: 'b> IntoValTyPair<'tcx> for &'b mir::Operand<'tcx> { + fn into_val_ty_pair<'a>(self, ecx: &mut EvalContext<'a, 'tcx>) -> EvalResult<'tcx, (Value, Ty<'tcx>)> where 'tcx: 'a { + let value = ecx.eval_operand(self)?; + let value_ty = ecx.operand_ty(self); + Ok((value, value_ty)) + } +} + + +/// FIXME: expose trans::monomorphize::resolve_closure +pub fn resolve_closure<'a, 'tcx> ( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + substs: ty::ClosureSubsts<'tcx>, + requested_kind: ty::ClosureKind, +) -> ty::Instance<'tcx> { + let actual_kind = tcx.closure_kind(def_id); + match needs_fn_once_adapter_shim(actual_kind, requested_kind) { + Ok(true) => fn_once_adapter_instance(tcx, def_id, substs), + _ => ty::Instance::new(def_id, substs.substs) + } +} + +fn fn_once_adapter_instance<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + closure_did: DefId, + substs: ty::ClosureSubsts<'tcx>, +) -> ty::Instance<'tcx> { + debug!("fn_once_adapter_shim({:?}, {:?})", + closure_did, + substs); + let fn_once = tcx.lang_items.fn_once_trait().unwrap(); + let call_once = tcx.associated_items(fn_once) + .find(|it| it.kind == ty::AssociatedKind::Method) + .unwrap().def_id; + let def = ty::InstanceDef::ClosureOnceShim { call_once }; + + let self_ty = tcx.mk_closure_from_closure_substs( + closure_did, substs); + + let sig = tcx.fn_sig(closure_did).subst(tcx, substs.substs); + let sig = tcx.erase_late_bound_regions_and_normalize(&sig); + assert_eq!(sig.inputs().len(), 1); + let substs = tcx.mk_substs([ + Kind::from(self_ty), + Kind::from(sig.inputs()[0]), + ].iter().cloned()); + + debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig); + ty::Instance { def, substs } +} + +fn needs_fn_once_adapter_shim(actual_closure_kind: ty::ClosureKind, + trait_closure_kind: ty::ClosureKind) + -> Result +{ + match (actual_closure_kind, trait_closure_kind) { + (ty::ClosureKind::Fn, ty::ClosureKind::Fn) | + (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) | + (ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => { + // No adapter needed. + Ok(false) + } + (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => { + // The closure fn `llfn` is a `fn(&self, ...)`. We want a + // `fn(&mut self, ...)`. In fact, at trans time, these are + // basically the same thing, so we can just return llfn. + Ok(false) + } + (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) | + (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => { + // The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut + // self, ...)`. We want a `fn(self, ...)`. We can produce + // this by doing something like: + // + // fn call_once(self, ...) { call_mut(&self, ...) } + // fn call_once(mut self, ...) { call_mut(&mut self, ...) } + // + // These are both the same at trans time. + Ok(true) + } + _ => Err(()), + } +} + +/// The point where linking happens. Resolve a (def_id, substs) +/// pair to an instance. +pub fn resolve<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + substs: &'tcx Substs<'tcx> +) -> ty::Instance<'tcx> { + debug!("resolve(def_id={:?}, substs={:?})", + def_id, substs); + let result = if let Some(trait_def_id) = tcx.trait_of_item(def_id) { + debug!(" => associated item, attempting to find impl"); + let item = tcx.associated_item(def_id); + resolve_associated_item(tcx, &item, trait_def_id, substs) + } else { + let item_type = def_ty(tcx, def_id, substs); + let def = match item_type.sty { + ty::TyFnDef(..) if { + let f = item_type.fn_sig(tcx); + f.abi() == Abi::RustIntrinsic || + f.abi() == Abi::PlatformIntrinsic + } => + { + debug!(" => intrinsic"); + ty::InstanceDef::Intrinsic(def_id) + } + _ => { + if Some(def_id) == tcx.lang_items.drop_in_place_fn() { + let ty = substs.type_at(0); + if needs_drop_glue(tcx, ty) { + debug!(" => nontrivial drop glue"); + ty::InstanceDef::DropGlue(def_id, Some(ty)) + } else { + debug!(" => trivial drop glue"); + ty::InstanceDef::DropGlue(def_id, None) + } + } else { + debug!(" => free item"); + ty::InstanceDef::Item(def_id) + } + } + }; + ty::Instance { def, substs } + }; + debug!("resolve(def_id={:?}, substs={:?}) = {}", + def_id, substs, result); + result +} + +pub fn needs_drop_glue<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, t: Ty<'tcx>) -> bool { + assert!(t.is_normalized_for_trans()); + + let t = tcx.erase_regions(&t); + + // FIXME (#22815): note that type_needs_drop conservatively + // approximates in some cases and may say a type expression + // requires drop glue when it actually does not. + // + // (In this case it is not clear whether any harm is done, i.e. + // erroneously returning `true` in some cases where we could have + // returned `false` does not appear unsound. The impact on + // code quality is unknown at this time.) + + let env = ty::ParamEnv::empty(Reveal::All); + if !t.needs_drop(tcx, env) { + return false; + } + match t.sty { + ty::TyAdt(def, _) if def.is_box() => { + let typ = t.boxed_ty(); + if !typ.needs_drop(tcx, env) && type_is_sized(tcx, typ) { + let layout = t.layout(tcx, ty::ParamEnv::empty(Reveal::All)).unwrap(); + // `Box` does not allocate. + layout.size(&tcx.data_layout).bytes() != 0 + } else { + true + } + } + _ => true + } +} + +fn resolve_associated_item<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + trait_item: &ty::AssociatedItem, + trait_id: DefId, + rcvr_substs: &'tcx Substs<'tcx> +) -> ty::Instance<'tcx> { + let def_id = trait_item.def_id; + debug!("resolve_associated_item(trait_item={:?}, \ + trait_id={:?}, \ + rcvr_substs={:?})", + def_id, trait_id, rcvr_substs); + + let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs); + let vtbl = fulfill_obligation(tcx, DUMMY_SP, ty::Binder(trait_ref)); + + // Now that we know which impl is being used, we can dispatch to + // the actual function: + match vtbl { + ::rustc::traits::VtableImpl(impl_data) => { + let (def_id, substs) = ::rustc::traits::find_associated_item( + tcx, trait_item, rcvr_substs, &impl_data); + let substs = tcx.erase_regions(&substs); + ty::Instance::new(def_id, substs) + } + ::rustc::traits::VtableClosure(closure_data) => { + let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap(); + resolve_closure(tcx, closure_data.closure_def_id, closure_data.substs, + trait_closure_kind) + } + ::rustc::traits::VtableFnPointer(ref data) => { + ty::Instance { + def: ty::InstanceDef::FnPtrShim(trait_item.def_id, data.fn_ty), + substs: rcvr_substs + } + } + ::rustc::traits::VtableObject(ref data) => { + let index = tcx.get_vtable_index_of_object_method(data, def_id); + ty::Instance { + def: ty::InstanceDef::Virtual(def_id, index), + substs: rcvr_substs + } + } + _ => { + bug!("static call to invalid vtable: {:?}", vtbl) + } + } +} + +pub fn def_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + substs: &'tcx Substs<'tcx>) + -> Ty<'tcx> +{ + let ty = tcx.type_of(def_id); + apply_param_substs(tcx, substs, &ty) +} + +/// Monomorphizes a type from the AST by first applying the in-scope +/// substitutions and then normalizing any associated types. +pub fn apply_param_substs<'a, 'tcx, T>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_substs: &Substs<'tcx>, + value: &T) + -> T + where T: ::rustc::infer::TransNormalize<'tcx> +{ + debug!("apply_param_substs(param_substs={:?}, value={:?})", param_substs, value); + let substituted = value.subst(tcx, param_substs); + let substituted = tcx.erase_regions(&substituted); + AssociatedTypeNormalizer{ tcx }.fold(&substituted) +} + + +struct AssociatedTypeNormalizer<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, +} + +impl<'a, 'tcx> AssociatedTypeNormalizer<'a, 'tcx> { + fn fold>(&mut self, value: &T) -> T { + if !value.has_projection_types() { + value.clone() + } else { + value.fold_with(self) + } + } +} + +impl<'a, 'tcx> ::rustc::ty::fold::TypeFolder<'tcx, 'tcx> for AssociatedTypeNormalizer<'a, 'tcx> { + fn tcx<'c>(&'c self) -> TyCtxt<'c, 'tcx, 'tcx> { + self.tcx + } + + fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { + if !ty.has_projection_types() { + ty + } else { + self.tcx.normalize_associated_type(&ty) + } + } +} + +fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + // generics are weird, don't run this function on a generic + assert!(!ty.needs_subst()); + ty.is_sized(tcx, ty::ParamEnv::empty(Reveal::All), DUMMY_SP) +} + +/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we +/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should +/// guarantee to us that all nested obligations *could be* resolved if we wanted to. +fn fulfill_obligation<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + span: Span, + trait_ref: ty::PolyTraitRef<'tcx>) + -> traits::Vtable<'tcx, ()> +{ + // Remove any references to regions; this helps improve caching. + let trait_ref = tcx.erase_regions(&trait_ref); + + debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})", + trait_ref, trait_ref.def_id()); + + // Do the initial selection for the obligation. This yields the + // shallow result we are looking for -- that is, what specific impl. + tcx.infer_ctxt().enter(|infcx| { + let mut selcx = traits::SelectionContext::new(&infcx); + + let obligation_cause = traits::ObligationCause::misc(span, + ast::DUMMY_NODE_ID); + let obligation = traits::Obligation::new(obligation_cause, + ty::ParamEnv::empty(Reveal::All), + trait_ref.to_poly_trait_predicate()); + + let selection = match selcx.select(&obligation) { + Ok(Some(selection)) => selection, + Ok(None) => { + // Ambiguity can happen when monomorphizing during trans + // expands to some humongo type that never occurred + // statically -- this humongo type can then overflow, + // leading to an ambiguous result. So report this as an + // overflow bug, since I believe this is the only case + // where ambiguity can result. + debug!("Encountered ambiguity selecting `{:?}` during trans, \ + presuming due to overflow", + trait_ref); + tcx.sess.span_fatal(span, + "reached the recursion limit during monomorphization \ + (selection ambiguity)"); + } + Err(e) => { + span_bug!(span, "Encountered error `{:?}` selecting `{:?}` during trans", + e, trait_ref) + } + }; + + debug!("fulfill_obligation: selection={:?}", selection); + + // Currently, we use a fulfillment context to completely resolve + // all nested obligations. This is because they can inform the + // inference of the impl's type parameters. + let mut fulfill_cx = traits::FulfillmentContext::new(); + let vtable = selection.map(|predicate| { + debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate); + fulfill_cx.register_predicate_obligation(&infcx, predicate); + }); + let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable); + + debug!("Cache miss: {:?} => {:?}", trait_ref, vtable); + vtable + }) +} + +pub fn resolve_drop_in_place<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: Ty<'tcx>, +) -> ty::Instance<'tcx> +{ + let def_id = tcx.require_lang_item(::rustc::middle::lang_items::DropInPlaceFnLangItem); + let substs = tcx.intern_substs(&[Kind::from(ty)]); + resolve(tcx, def_id, substs) +} diff --git a/src/librustc_mir/miri/lvalue.rs b/src/librustc_mir/miri/lvalue.rs new file mode 100644 index 0000000000000..1435e75cde44f --- /dev/null +++ b/src/librustc_mir/miri/lvalue.rs @@ -0,0 +1,463 @@ +use rustc::mir; +use rustc::ty::layout::{Size, Align}; +use rustc::ty::{self, Ty}; +use rustc_data_structures::indexed_vec::Idx; +use syntax::ast::Mutability; + +use super::error::{EvalError, EvalResult}; +use super::eval_context::EvalContext; +use super::memory::MemoryPointer; +use super::value::{PrimVal, Value, Pointer}; + +#[derive(Copy, Clone, Debug)] +pub enum Lvalue<'tcx> { + /// An lvalue referring to a value allocated in the `Memory` system. + Ptr { + /// An lvalue may have an invalid (integral or undef) pointer, + /// since it might be turned back into a reference + /// before ever being dereferenced. + ptr: Pointer, + extra: LvalueExtra, + /// Remember whether this lvalue is *supposed* to be aligned. + aligned: bool, + }, + + /// An lvalue referring to a value on the stack. Represented by a stack frame index paired with + /// a Mir local index. + Local { + frame: usize, + local: mir::Local, + }, + + /// An lvalue referring to a global + Global(GlobalId<'tcx>), +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum LvalueExtra { + None, + Length(u64), + Vtable(MemoryPointer), + DowncastVariant(usize), +} + +/// Uniquely identifies a specific constant or static. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub struct GlobalId<'tcx> { + /// For a constant or static, the `Instance` of the item itself. + /// For a promoted global, the `Instance` of the function they belong to. + pub(super) instance: ty::Instance<'tcx>, + + /// The index for promoted globals within their function's `Mir`. + pub(super) promoted: Option, +} + +#[derive(Clone, Debug)] +pub struct Global<'tcx> { + pub(super) value: Value, + /// Only used in `force_allocation` to ensure we don't mark the memory + /// before the static is initialized. It is possible to convert a + /// global which initially is `Value::ByVal(PrimVal::Undef)` and gets + /// lifted to an allocation before the static is fully initialized + pub(super) initialized: bool, + pub(super) mutable: Mutability, + pub(super) ty: Ty<'tcx>, +} + +impl<'tcx> Lvalue<'tcx> { + /// Produces an Lvalue that will error if attempted to be read from + pub fn undef() -> Self { + Self::from_primval_ptr(PrimVal::Undef.into()) + } + + pub(crate) fn from_primval_ptr(ptr: Pointer) -> Self { + Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } + } + + pub(crate) fn from_ptr(ptr: MemoryPointer) -> Self { + Self::from_primval_ptr(ptr.into()) + } + + pub(super) fn to_ptr_extra_aligned(self) -> (Pointer, LvalueExtra, bool) { + match self { + Lvalue::Ptr { ptr, extra, aligned } => (ptr, extra, aligned), + _ => bug!("to_ptr_and_extra: expected Lvalue::Ptr, got {:?}", self), + + } + } + + pub(super) fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> { + let (ptr, extra, _aligned) = self.to_ptr_extra_aligned(); + // At this point, we forget about the alignment information -- the lvalue has been turned into a reference, + // and no matter where it came from, it now must be aligned. + assert_eq!(extra, LvalueExtra::None); + ptr.to_ptr() + } + + pub(super) fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) { + match ty.sty { + ty::TyArray(elem, n) => (elem, n as u64), + + ty::TySlice(elem) => { + match self { + Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => (elem, len), + _ => bug!("elem_ty_and_len of a TySlice given non-slice lvalue: {:?}", self), + } + } + + _ => bug!("elem_ty_and_len expected array or slice, got {:?}", ty), + } + } +} + +impl<'tcx> Global<'tcx> { + pub(super) fn uninitialized(ty: Ty<'tcx>) -> Self { + Global { + value: Value::ByVal(PrimVal::Undef), + mutable: Mutability::Mutable, + ty, + initialized: false, + } + } + + pub(super) fn initialized(ty: Ty<'tcx>, value: Value, mutable: Mutability) -> Self { + Global { + value, + mutable, + ty, + initialized: true, + } + } +} + +impl<'a, 'tcx> EvalContext<'a, 'tcx> { + /// Reads a value from the lvalue without going through the intermediate step of obtaining + /// a `miri::Lvalue` + pub fn try_read_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Option> { + use rustc::mir::Lvalue::*; + match *lvalue { + // Might allow this in the future, right now there's no way to do this from Rust code anyway + Local(mir::RETURN_POINTER) => Err(EvalError::ReadFromReturnPointer), + // Directly reading a local will always succeed + Local(local) => self.frame().get_local(local).map(Some), + // Directly reading a static will always succeed + Static(ref static_) => { + let instance = ty::Instance::mono(self.tcx, static_.def_id); + let cid = GlobalId { instance, promoted: None }; + Ok(Some(self.globals.get(&cid).expect("global not cached").value)) + }, + Projection(ref proj) => self.try_read_lvalue_projection(proj), + } + } + + fn try_read_lvalue_projection(&mut self, proj: &mir::LvalueProjection<'tcx>) -> EvalResult<'tcx, Option> { + use rustc::mir::ProjectionElem::*; + let base = match self.try_read_lvalue(&proj.base)? { + Some(base) => base, + None => return Ok(None), + }; + let base_ty = self.lvalue_ty(&proj.base); + match proj.elem { + Field(field, _) => match (field.index(), base) { + // the only field of a struct + (0, Value::ByVal(val)) => Ok(Some(Value::ByVal(val))), + // split fat pointers, 2 element tuples, ... + (0...1, Value::ByValPair(a, b)) if self.get_field_count(base_ty)? == 2 => { + let val = [a, b][field.index()]; + Ok(Some(Value::ByVal(val))) + }, + // the only field of a struct is a fat pointer + (0, Value::ByValPair(..)) => Ok(Some(base)), + _ => Ok(None), + }, + // The NullablePointer cases should work fine, need to take care for normal enums + Downcast(..) | + Subslice { .. } | + // reading index 0 or index 1 from a ByVal or ByVal pair could be optimized + ConstantIndex { .. } | Index(_) | + // No way to optimize this projection any better than the normal lvalue path + Deref => Ok(None), + } + } + + /// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses. + pub(super) fn eval_and_read_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Value> { + let ty = self.lvalue_ty(lvalue); + // Shortcut for things like accessing a fat pointer's field, + // which would otherwise (in the `eval_lvalue` path) require moving a `ByValPair` to memory + // and returning an `Lvalue::Ptr` to it + if let Some(val) = self.try_read_lvalue(lvalue)? { + return Ok(val); + } + let lvalue = self.eval_lvalue(lvalue)?; + self.read_lvalue(lvalue, ty) + } + + pub fn read_lvalue(&self, lvalue: Lvalue<'tcx>, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { + if ty.is_never() { + return Err(EvalError::Unreachable); + } + + match lvalue { + Lvalue::Ptr { ptr, extra, aligned } => { + assert_eq!(extra, LvalueExtra::None); + Ok(Value::ByRef(ptr, aligned)) + } + Lvalue::Local { frame, local } => { + self.stack[frame].get_local(local) + } + Lvalue::Global(cid) => { + Ok(self.globals.get(&cid).expect("global not cached").value) + } + } + } + + pub(super) fn eval_lvalue(&mut self, mir_lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Lvalue<'tcx>> { + use rustc::mir::Lvalue::*; + let lvalue = match *mir_lvalue { + Local(mir::RETURN_POINTER) => self.frame().return_lvalue, + Local(local) => Lvalue::Local { frame: self.stack.len() - 1, local }, + + Static(ref static_) => { + let instance = ty::Instance::mono(self.tcx, static_.def_id); + Lvalue::Global(GlobalId { instance, promoted: None }) + } + + Projection(ref proj) => { + let ty = self.lvalue_ty(&proj.base); + let lvalue = self.eval_lvalue(&proj.base)?; + return self.eval_lvalue_projection(lvalue, ty, &proj.elem); + } + }; + + if log_enabled!(::log::LogLevel::Trace) { + self.dump_local(lvalue); + } + + Ok(lvalue) + } + + pub fn lvalue_field( + &mut self, + base: Lvalue<'tcx>, + field_index: usize, + base_ty: Ty<'tcx>, + field_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, Lvalue<'tcx>> { + let base_layout = self.type_layout(base_ty)?; + use rustc::ty::layout::Layout::*; + let (offset, packed) = match *base_layout { + Univariant { ref variant, .. } => { + (variant.offsets[field_index], variant.packed) + }, + + General { ref variants, .. } => { + let (_, base_extra, _) = base.to_ptr_extra_aligned(); + if let LvalueExtra::DowncastVariant(variant_idx) = base_extra { + // +1 for the discriminant, which is field 0 + (variants[variant_idx].offsets[field_index + 1], variants[variant_idx].packed) + } else { + bug!("field access on enum had no variant index"); + } + } + + RawNullablePointer { .. } => { + assert_eq!(field_index, 0); + return Ok(base); + } + + StructWrappedNullablePointer { ref nonnull, .. } => { + (nonnull.offsets[field_index], nonnull.packed) + } + + UntaggedUnion { .. } => return Ok(base), + + Vector { element, count } => { + let field = field_index as u64; + assert!(field < count); + let elem_size = element.size(&self.tcx.data_layout).bytes(); + (Size::from_bytes(field * elem_size), false) + } + + // We treat arrays + fixed sized indexing like field accesses + Array { .. } => { + let field = field_index as u64; + let elem_size = match base_ty.sty { + ty::TyArray(elem_ty, n) => { + assert!(field < n as u64); + self.type_size(elem_ty)?.expect("array elements are sized") as u64 + }, + _ => bug!("lvalue_field: got Array layout but non-array type {:?}", base_ty), + }; + (Size::from_bytes(field * elem_size), false) + } + + FatPointer { .. } => { + let bytes = field_index as u64 * self.memory.pointer_size(); + let offset = Size::from_bytes(bytes); + (offset, false) + } + + _ => bug!("field access on non-product type: {:?}", base_layout), + }; + + // Do not allocate in trivial cases + let (base_ptr, base_extra, aligned) = match base { + Lvalue::Ptr { ptr, extra, aligned } => (ptr, extra, aligned), + Lvalue::Local { frame, local } => match self.stack[frame].get_local(local)? { + // in case the type has a single field, just return the value + Value::ByVal(_) if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or(false) => { + assert_eq!(offset.bytes(), 0, "ByVal can only have 1 non zst field with offset 0"); + return Ok(base); + }, + Value::ByRef(..) | + Value::ByValPair(..) | + Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(), + }, + Lvalue::Global(cid) => match self.globals.get(&cid).expect("uncached global").value { + // in case the type has a single field, just return the value + Value::ByVal(_) if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or(false) => { + assert_eq!(offset.bytes(), 0, "ByVal can only have 1 non zst field with offset 0"); + return Ok(base); + }, + Value::ByRef(..) | + Value::ByValPair(..) | + Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(), + }, + }; + + let offset = match base_extra { + LvalueExtra::Vtable(tab) => { + let (_, align) = self.size_and_align_of_dst(base_ty, base_ptr.to_value_with_vtable(tab))?; + offset.abi_align(Align::from_bytes(align, align).unwrap()).bytes() + } + _ => offset.bytes(), + }; + + let ptr = base_ptr.offset(offset, self.memory.layout)?; + + let field_ty = self.monomorphize(field_ty, self.substs()); + + let extra = if self.type_is_sized(field_ty) { + LvalueExtra::None + } else { + match base_extra { + LvalueExtra::None => bug!("expected fat pointer"), + LvalueExtra::DowncastVariant(..) => + bug!("Rust doesn't support unsized fields in enum variants"), + LvalueExtra::Vtable(_) | + LvalueExtra::Length(_) => {}, + } + base_extra + }; + + Ok(Lvalue::Ptr { ptr, extra, aligned: aligned && !packed }) + } + + fn eval_lvalue_projection( + &mut self, + base: Lvalue<'tcx>, + base_ty: Ty<'tcx>, + proj_elem: &mir::ProjectionElem<'tcx, mir::Operand<'tcx>>, + ) -> EvalResult<'tcx, Lvalue<'tcx>> { + use rustc::mir::ProjectionElem::*; + let (ptr, extra, aligned) = match *proj_elem { + Field(field, field_ty) => { + return self.lvalue_field(base, field.index(), base_ty, field_ty); + } + + Downcast(_, variant) => { + let base_layout = self.type_layout(base_ty)?; + // FIXME(solson) + let base = self.force_allocation(base)?; + let (base_ptr, base_extra, aligned) = base.to_ptr_extra_aligned(); + + use rustc::ty::layout::Layout::*; + let extra = match *base_layout { + General { .. } => LvalueExtra::DowncastVariant(variant), + RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => base_extra, + _ => bug!("variant downcast on non-aggregate: {:?}", base_layout), + }; + (base_ptr, extra, aligned) + } + + Deref => { + let val = self.read_lvalue(base, base_ty)?; + + let pointee_type = match base_ty.sty { + ty::TyRawPtr(ref tam) | + ty::TyRef(_, ref tam) => tam.ty, + ty::TyAdt(def, _) if def.is_box() => base_ty.boxed_ty(), + _ => bug!("can only deref pointer types"), + }; + + trace!("deref to {} on {:?}", pointee_type, val); + + match self.tcx.struct_tail(pointee_type).sty { + ty::TyDynamic(..) => { + let (ptr, vtable) = val.into_ptr_vtable_pair(&mut self.memory)?; + (ptr, LvalueExtra::Vtable(vtable), true) + }, + ty::TyStr | ty::TySlice(_) => { + let (ptr, len) = val.into_slice(&mut self.memory)?; + (ptr, LvalueExtra::Length(len), true) + }, + _ => (val.into_ptr(&mut self.memory)?, LvalueExtra::None, true), + } + } + + Index(ref operand) => { + // FIXME(solson) + let base = self.force_allocation(base)?; + let (base_ptr, _, aligned) = base.to_ptr_extra_aligned(); + + let (elem_ty, len) = base.elem_ty_and_len(base_ty); + let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized"); + let n_ptr = self.eval_operand(operand)?; + let usize = self.tcx.types.usize; + let n = self.value_to_primval(n_ptr, usize)?.to_u64()?; + assert!(n < len, "Tried to access element {} of array/slice with length {}", n, len); + let ptr = base_ptr.offset(n * elem_size, self.memory.layout)?; + (ptr, LvalueExtra::None, aligned) + } + + ConstantIndex { offset, min_length, from_end } => { + // FIXME(solson) + let base = self.force_allocation(base)?; + let (base_ptr, _, aligned) = base.to_ptr_extra_aligned(); + + let (elem_ty, n) = base.elem_ty_and_len(base_ty); + let elem_size = self.type_size(elem_ty)?.expect("sequence element must be sized"); + assert!(n >= min_length as u64); + + let index = if from_end { + n - u64::from(offset) + } else { + u64::from(offset) + }; + + let ptr = base_ptr.offset(index * elem_size, self.memory.layout)?; + (ptr, LvalueExtra::None, aligned) + } + + Subslice { from, to } => { + // FIXME(solson) + let base = self.force_allocation(base)?; + let (base_ptr, _, aligned) = base.to_ptr_extra_aligned(); + + let (elem_ty, n) = base.elem_ty_and_len(base_ty); + let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized"); + assert!(u64::from(from) <= n - u64::from(to)); + let ptr = base_ptr.offset(u64::from(from) * elem_size, self.memory.layout)?; + let extra = LvalueExtra::Length(n - u64::from(to) - u64::from(from)); + (ptr, extra, aligned) + } + }; + + Ok(Lvalue::Ptr { ptr, extra, aligned }) + } + + pub(super) fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> { + self.monomorphize(lvalue.ty(self.mir(), self.tcx).to_ty(self.tcx), self.substs()) + } +} diff --git a/src/librustc_mir/miri/memory.rs b/src/librustc_mir/miri/memory.rs new file mode 100644 index 0000000000000..fd07ade025e96 --- /dev/null +++ b/src/librustc_mir/miri/memory.rs @@ -0,0 +1,1148 @@ +use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian}; +use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque}; +use std::{fmt, iter, ptr, mem, io}; + +use rustc::ty; +use rustc::ty::layout::{self, TargetDataLayout}; +use syntax::ast::Mutability; + +use super::error::{EvalError, EvalResult}; +use super::value::{PrimVal, self, Pointer}; +use super::eval_context::EvalContext; + +//////////////////////////////////////////////////////////////////////////////// +// Allocations and pointers +//////////////////////////////////////////////////////////////////////////////// + +#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct AllocId(pub u64); + +impl fmt::Display for AllocId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +#[derive(Debug)] +pub struct Allocation { + /// The actual bytes of the allocation. + /// Note that the bytes of a pointer represent the offset of the pointer + pub bytes: Vec, + /// Maps from byte addresses to allocations. + /// Only the first byte of a pointer is inserted into the map. + pub relocations: BTreeMap, + /// Denotes undefined memory. Reading from undefined memory is forbidden in miri + pub undef_mask: UndefMask, + /// The alignment of the allocation to detect unaligned reads. + pub align: u64, + /// Whether the allocation may be modified. + pub mutable: Mutability, + /// Use the `mark_static_initalized` method of `Memory` to ensure that an error occurs, if the memory of this + /// allocation is modified or deallocated in the future. + /// Helps guarantee that stack allocations aren't deallocated via `rust_deallocate` + pub kind: Kind, +} + +#[derive(Debug, PartialEq, Copy, Clone)] +pub enum Kind { + /// Error if deallocated any other way than `rust_deallocate` + Rust, + /// Error if deallocated any other way than `free` + C, + /// Error if deallocated except during a stack pop + Stack, + /// Static in the process of being initialized. + /// The difference is important: An immutable static referring to a + /// mutable initialized static will freeze immutably and would not + /// be able to distinguish already initialized statics from uninitialized ones + UninitializedStatic, + /// May never be deallocated + Static, + /// Part of env var emulation + Env, +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct MemoryPointer { + pub alloc_id: AllocId, + pub offset: u64, +} + +impl MemoryPointer { + pub fn new(alloc_id: AllocId, offset: u64) -> Self { + MemoryPointer { alloc_id, offset } + } + + pub fn wrapping_signed_offset<'tcx>(self, i: i64, layout: &TargetDataLayout) -> Self { + MemoryPointer::new(self.alloc_id, value::wrapping_signed_offset(self.offset, i, layout)) + } + + pub fn overflowing_signed_offset<'tcx>(self, i: i128, layout: &TargetDataLayout) -> (Self, bool) { + let (res, over) = value::overflowing_signed_offset(self.offset, i, layout); + (MemoryPointer::new(self.alloc_id, res), over) + } + + pub fn signed_offset<'tcx>(self, i: i64, layout: &TargetDataLayout) -> EvalResult<'tcx, Self> { + Ok(MemoryPointer::new(self.alloc_id, value::signed_offset(self.offset, i, layout)?)) + } + + pub fn overflowing_offset<'tcx>(self, i: u64, layout: &TargetDataLayout) -> (Self, bool) { + let (res, over) = value::overflowing_offset(self.offset, i, layout); + (MemoryPointer::new(self.alloc_id, res), over) + } + + pub fn offset<'tcx>(self, i: u64, layout: &TargetDataLayout) -> EvalResult<'tcx, Self> { + Ok(MemoryPointer::new(self.alloc_id, value::offset(self.offset, i, layout)?)) + } +} + +pub type TlsKey = usize; + +#[derive(Copy, Clone, Debug)] +pub struct TlsEntry<'tcx> { + data: Pointer, // Will eventually become a map from thread IDs to `Pointer`s, if we ever support more than one thread. + dtor: Option>, +} + +//////////////////////////////////////////////////////////////////////////////// +// Top-level interpreter memory +//////////////////////////////////////////////////////////////////////////////// + +pub struct Memory<'a, 'tcx> { + /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations). + alloc_map: HashMap, + + /// The AllocId to assign to the next new allocation. Always incremented, never gets smaller. + next_id: AllocId, + + /// Set of statics, constants, promoteds, vtables, ... to prevent `mark_static_initalized` from + /// stepping out of its own allocations. This set only contains statics backed by an + /// allocation. If they are ByVal or ByValPair they are not here, but will be inserted once + /// they become ByRef. + static_alloc: HashSet, + + /// Number of virtual bytes allocated. + memory_usage: u64, + + /// Maximum number of virtual bytes that may be allocated. + memory_size: u64, + + /// Function "allocations". They exist solely so pointers have something to point to, and + /// we can figure out what they point to. + functions: HashMap>, + + /// Inverse map of `functions` so we don't allocate a new pointer every time we need one + function_alloc_cache: HashMap, AllocId>, + + /// Target machine data layout to emulate. + pub layout: &'a TargetDataLayout, + + /// A cache for basic byte allocations keyed by their contents. This is used to deduplicate + /// allocations for string and bytestring literals. + literal_alloc_cache: HashMap, AllocId>, + + /// pthreads-style thread-local storage. + thread_local: BTreeMap>, + + /// The Key to use for the next thread-local allocation. + next_thread_local: TlsKey, + + /// To avoid having to pass flags to every single memory access, we have some global state saying whether + /// alignment checking is currently enforced for read and/or write accesses. + reads_are_aligned: bool, + writes_are_aligned: bool, +} + +impl<'a, 'tcx> Memory<'a, 'tcx> { + pub fn new(layout: &'a TargetDataLayout, max_memory: u64) -> Self { + Memory { + alloc_map: HashMap::new(), + functions: HashMap::new(), + function_alloc_cache: HashMap::new(), + next_id: AllocId(0), + layout, + memory_size: max_memory, + memory_usage: 0, + static_alloc: HashSet::new(), + literal_alloc_cache: HashMap::new(), + thread_local: BTreeMap::new(), + next_thread_local: 0, + reads_are_aligned: true, + writes_are_aligned: true, + } + } + + pub fn allocations(&self) -> ::std::collections::hash_map::Iter { + self.alloc_map.iter() + } + + pub fn create_fn_alloc(&mut self, instance: ty::Instance<'tcx>) -> MemoryPointer { + if let Some(&alloc_id) = self.function_alloc_cache.get(&instance) { + return MemoryPointer::new(alloc_id, 0); + } + let id = self.next_id; + debug!("creating fn ptr: {}", id); + self.next_id.0 += 1; + self.functions.insert(id, instance); + self.function_alloc_cache.insert(instance, id); + MemoryPointer::new(id, 0) + } + + pub fn allocate_cached(&mut self, bytes: &[u8]) -> EvalResult<'tcx, MemoryPointer> { + if let Some(&alloc_id) = self.literal_alloc_cache.get(bytes) { + return Ok(MemoryPointer::new(alloc_id, 0)); + } + + let ptr = self.allocate(bytes.len() as u64, 1, Kind::UninitializedStatic)?; + self.write_bytes(PrimVal::Ptr(ptr), bytes)?; + self.mark_static_initalized(ptr.alloc_id, Mutability::Immutable)?; + self.literal_alloc_cache.insert(bytes.to_vec(), ptr.alloc_id); + Ok(ptr) + } + + pub fn allocate(&mut self, size: u64, align: u64, kind: Kind) -> EvalResult<'tcx, MemoryPointer> { + assert_ne!(align, 0); + assert!(align.is_power_of_two()); + + if self.memory_size - self.memory_usage < size { + return Err(EvalError::OutOfMemory { + allocation_size: size, + memory_size: self.memory_size, + memory_usage: self.memory_usage, + }); + } + self.memory_usage += size; + assert_eq!(size as usize as u64, size); + let alloc = Allocation { + bytes: vec![0; size as usize], + relocations: BTreeMap::new(), + undef_mask: UndefMask::new(size), + align, + kind, + mutable: Mutability::Mutable, + }; + let id = self.next_id; + self.next_id.0 += 1; + self.alloc_map.insert(id, alloc); + Ok(MemoryPointer::new(id, 0)) + } + + pub fn reallocate(&mut self, ptr: MemoryPointer, old_size: u64, old_align: u64, new_size: u64, new_align: u64, kind: Kind) -> EvalResult<'tcx, MemoryPointer> { + use std::cmp::min; + + if ptr.offset != 0 { + return Err(EvalError::ReallocateNonBasePtr); + } + if let Ok(alloc) = self.get(ptr.alloc_id) { + if alloc.kind != kind { + return Err(EvalError::ReallocatedWrongMemoryKind(alloc.kind, kind)); + } + } + + // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc" + let new_ptr = self.allocate(new_size, new_align, kind)?; + self.copy(ptr.into(), new_ptr.into(), min(old_size, new_size), min(old_align, new_align), /*nonoverlapping*/true)?; + self.deallocate(ptr, Some((old_size, old_align)), kind)?; + + Ok(new_ptr) + } + + pub fn deallocate(&mut self, ptr: MemoryPointer, size_and_align: Option<(u64, u64)>, kind: Kind) -> EvalResult<'tcx> { + if ptr.offset != 0 { + return Err(EvalError::DeallocateNonBasePtr); + } + + let alloc = match self.alloc_map.remove(&ptr.alloc_id) { + Some(alloc) => alloc, + None => return Err(EvalError::DoubleFree), + }; + + if alloc.kind != kind { + return Err(EvalError::DeallocatedWrongMemoryKind(alloc.kind, kind)); + } + if let Some((size, align)) = size_and_align { + if size != alloc.bytes.len() as u64 || align != alloc.align { + return Err(EvalError::IncorrectAllocationInformation); + } + } + + self.memory_usage -= alloc.bytes.len() as u64; + debug!("deallocated : {}", ptr.alloc_id); + + Ok(()) + } + + pub fn pointer_size(&self) -> u64 { + self.layout.pointer_size.bytes() + } + + pub fn endianess(&self) -> layout::Endian { + self.layout.endian + } + + pub fn check_align(&self, ptr: Pointer, align: u64) -> EvalResult<'tcx> { + let offset = match ptr.into_inner_primval() { + PrimVal::Ptr(ptr) => { + let alloc = self.get(ptr.alloc_id)?; + if alloc.align < align { + return Err(EvalError::AlignmentCheckFailed { + has: alloc.align, + required: align, + }); + } + ptr.offset + }, + PrimVal::Bytes(bytes) => { + let v = ((bytes as u128) % (1 << self.pointer_size())) as u64; + if v == 0 { + return Err(EvalError::InvalidNullPointerUsage); + } + v + }, + PrimVal::Undef => return Err(EvalError::ReadUndefBytes), + }; + if offset % align == 0 { + Ok(()) + } else { + Err(EvalError::AlignmentCheckFailed { + has: offset % align, + required: align, + }) + } + } + + pub(crate) fn check_bounds(&self, ptr: MemoryPointer, access: bool) -> EvalResult<'tcx> { + let alloc = self.get(ptr.alloc_id)?; + let allocation_size = alloc.bytes.len() as u64; + if ptr.offset > allocation_size { + return Err(EvalError::PointerOutOfBounds { ptr, access, allocation_size }); + } + Ok(()) + } + + pub(crate) fn create_tls_key(&mut self, dtor: Option>) -> TlsKey { + let new_key = self.next_thread_local; + self.next_thread_local += 1; + self.thread_local.insert(new_key, TlsEntry { data: Pointer::null(), dtor }); + trace!("New TLS key allocated: {} with dtor {:?}", new_key, dtor); + return new_key; + } + + pub(crate) fn delete_tls_key(&mut self, key: TlsKey) -> EvalResult<'tcx> { + return match self.thread_local.remove(&key) { + Some(_) => { + trace!("TLS key {} removed", key); + Ok(()) + }, + None => Err(EvalError::TlsOutOfBounds) + } + } + + pub(crate) fn load_tls(&mut self, key: TlsKey) -> EvalResult<'tcx, Pointer> { + return match self.thread_local.get(&key) { + Some(&TlsEntry { data, .. }) => { + trace!("TLS key {} loaded: {:?}", key, data); + Ok(data) + }, + None => Err(EvalError::TlsOutOfBounds) + } + } + + pub(crate) fn store_tls(&mut self, key: TlsKey, new_data: Pointer) -> EvalResult<'tcx> { + return match self.thread_local.get_mut(&key) { + Some(&mut TlsEntry { ref mut data, .. }) => { + trace!("TLS key {} stored: {:?}", key, new_data); + *data = new_data; + Ok(()) + }, + None => Err(EvalError::TlsOutOfBounds) + } + } + + /// Returns a dtor, its argument and its index, if one is supposed to run + /// + /// An optional destructor function may be associated with each key value. + /// At thread exit, if a key value has a non-NULL destructor pointer, + /// and the thread has a non-NULL value associated with that key, + /// the value of the key is set to NULL, and then the function pointed + /// to is called with the previously associated value as its sole argument. + /// The order of destructor calls is unspecified if more than one destructor + /// exists for a thread when it exits. + /// + /// If, after all the destructors have been called for all non-NULL values + /// with associated destructors, there are still some non-NULL values with + /// associated destructors, then the process is repeated. + /// If, after at least {PTHREAD_DESTRUCTOR_ITERATIONS} iterations of destructor + /// calls for outstanding non-NULL values, there are still some non-NULL values + /// with associated destructors, implementations may stop calling destructors, + /// or they may continue calling destructors until no non-NULL values with + /// associated destructors exist, even though this might result in an infinite loop. + pub(crate) fn fetch_tls_dtor(&mut self, key: Option) -> EvalResult<'tcx, Option<(ty::Instance<'tcx>, Pointer, TlsKey)>> { + use std::collections::Bound::*; + let start = match key { + Some(key) => Excluded(key), + None => Unbounded, + }; + for (&key, &mut TlsEntry { ref mut data, dtor }) in self.thread_local.range_mut((start, Unbounded)) { + if !data.is_null()? { + if let Some(dtor) = dtor { + let ret = Some((dtor, *data, key)); + *data = Pointer::null(); + return Ok(ret); + } + } + } + return Ok(None); + } +} + +/// Allocation accessors +impl<'a, 'tcx> Memory<'a, 'tcx> { + pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> { + match self.alloc_map.get(&id) { + Some(alloc) => Ok(alloc), + None => match self.functions.get(&id) { + Some(_) => Err(EvalError::DerefFunctionPointer), + None => Err(EvalError::DanglingPointerDeref), + } + } + } + + pub fn get_mut(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation> { + match self.alloc_map.get_mut(&id) { + Some(alloc) => if alloc.mutable == Mutability::Mutable { + Ok(alloc) + } else { + Err(EvalError::ModifiedConstantMemory) + }, + None => match self.functions.get(&id) { + Some(_) => Err(EvalError::DerefFunctionPointer), + None => Err(EvalError::DanglingPointerDeref), + } + } + } + + pub fn get_fn(&self, ptr: MemoryPointer) -> EvalResult<'tcx, ty::Instance<'tcx>> { + if ptr.offset != 0 { + return Err(EvalError::InvalidFunctionPointer); + } + debug!("reading fn ptr: {}", ptr.alloc_id); + match self.functions.get(&ptr.alloc_id) { + Some(&fndef) => Ok(fndef), + None => match self.alloc_map.get(&ptr.alloc_id) { + Some(_) => Err(EvalError::ExecuteMemory), + None => Err(EvalError::InvalidFunctionPointer), + } + } + } + + /// For debugging, print an allocation and all allocations it points to, recursively. + pub fn dump_alloc(&self, id: AllocId) { + self.dump_allocs(vec![id]); + } + + /// For debugging, print a list of allocations and all allocations they point to, recursively. + pub fn dump_allocs(&self, mut allocs: Vec) { + use std::fmt::Write; + allocs.sort(); + allocs.dedup(); + let mut allocs_to_print = VecDeque::from(allocs); + let mut allocs_seen = HashSet::new(); + + while let Some(id) = allocs_to_print.pop_front() { + let mut msg = format!("Alloc {:<5} ", format!("{}:", id)); + let prefix_len = msg.len(); + let mut relocations = vec![]; + + let alloc = match (self.alloc_map.get(&id), self.functions.get(&id)) { + (Some(a), None) => a, + (None, Some(instance)) => { + trace!("{} {}", msg, instance); + continue; + }, + (None, None) => { + trace!("{} (deallocated)", msg); + continue; + }, + (Some(_), Some(_)) => bug!("miri invariant broken: an allocation id exists that points to both a function and a memory location"), + }; + + for i in 0..(alloc.bytes.len() as u64) { + if let Some(&target_id) = alloc.relocations.get(&i) { + if allocs_seen.insert(target_id) { + allocs_to_print.push_back(target_id); + } + relocations.push((i, target_id)); + } + if alloc.undef_mask.is_range_defined(i, i + 1) { + // this `as usize` is fine, since `i` came from a `usize` + write!(msg, "{:02x} ", alloc.bytes[i as usize]).unwrap(); + } else { + msg.push_str("__ "); + } + } + + let immutable = match (alloc.kind, alloc.mutable) { + (Kind::UninitializedStatic, _) => " (static in the process of initialization)", + (Kind::Static, Mutability::Mutable) => " (static mut)", + (Kind::Static, Mutability::Immutable) => " (immutable)", + (Kind::Env, _) => " (env var)", + (Kind::C, _) => " (malloc)", + (Kind::Rust, _) => " (heap)", + (Kind::Stack, _) => " (stack)", + }; + trace!("{}({} bytes, alignment {}){}", msg, alloc.bytes.len(), alloc.align, immutable); + + if !relocations.is_empty() { + msg.clear(); + write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces. + let mut pos = 0; + let relocation_width = (self.pointer_size() - 1) * 3; + for (i, target_id) in relocations { + // this `as usize` is fine, since we can't print more chars than `usize::MAX` + write!(msg, "{:1$}", "", ((i - pos) * 3) as usize).unwrap(); + let target = format!("({})", target_id); + // this `as usize` is fine, since we can't print more chars than `usize::MAX` + write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap(); + pos = i + self.pointer_size(); + } + trace!("{}", msg); + } + } + } + + pub fn leak_report(&self) -> usize { + trace!("### LEAK REPORT ###"); + let leaks: Vec<_> = self.alloc_map + .iter() + .filter_map(|(&key, val)| { + if val.kind != Kind::Static { + Some(key) + } else { + None + } + }) + .collect(); + let n = leaks.len(); + self.dump_allocs(leaks); + n + } +} + +/// Byte accessors +impl<'a, 'tcx> Memory<'a, 'tcx> { + fn get_bytes_unchecked(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> { + if size == 0 { + return Ok(&[]); + } + // FIXME: check alignment for zst memory accesses? + if self.reads_are_aligned { + self.check_align(ptr.into(), align)?; + } + self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) + let alloc = self.get(ptr.alloc_id)?; + assert_eq!(ptr.offset as usize as u64, ptr.offset); + assert_eq!(size as usize as u64, size); + let offset = ptr.offset as usize; + Ok(&alloc.bytes[offset..offset + size as usize]) + } + + fn get_bytes_unchecked_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> { + if size == 0 { + return Ok(&mut []); + } + // FIXME: check alignment for zst memory accesses? + if self.writes_are_aligned { + self.check_align(ptr.into(), align)?; + } + self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) + let alloc = self.get_mut(ptr.alloc_id)?; + assert_eq!(ptr.offset as usize as u64, ptr.offset); + assert_eq!(size as usize as u64, size); + let offset = ptr.offset as usize; + Ok(&mut alloc.bytes[offset..offset + size as usize]) + } + + fn get_bytes(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> { + assert_ne!(size, 0); + if self.relocations(ptr, size)?.count() != 0 { + return Err(EvalError::ReadPointerAsBytes); + } + self.check_defined(ptr, size)?; + self.get_bytes_unchecked(ptr, size, align) + } + + fn get_bytes_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> { + assert_ne!(size, 0); + self.clear_relocations(ptr, size)?; + self.mark_definedness(ptr.into(), size, true)?; + self.get_bytes_unchecked_mut(ptr, size, align) + } +} + +/// Reading and writing +impl<'a, 'tcx> Memory<'a, 'tcx> { + /// mark an allocation as being the entry point to a static (see `static_alloc` field) + pub fn mark_static(&mut self, alloc_id: AllocId) { + trace!("mark_static: {:?}", alloc_id); + if !self.static_alloc.insert(alloc_id) { + bug!("tried to mark an allocation ({:?}) as static twice", alloc_id); + } + } + + /// mark an allocation pointed to by a static as static and initialized + pub fn mark_inner_allocation(&mut self, alloc: AllocId, mutability: Mutability) -> EvalResult<'tcx> { + // relocations into other statics are not "inner allocations" + if !self.static_alloc.contains(&alloc) { + self.mark_static_initalized(alloc, mutability)?; + } + Ok(()) + } + + /// mark an allocation as static and initialized, either mutable or not + pub fn mark_static_initalized(&mut self, alloc_id: AllocId, mutability: Mutability) -> EvalResult<'tcx> { + trace!("mark_static_initalized {:?}, mutability: {:?}", alloc_id, mutability); + // do not use `self.get_mut(alloc_id)` here, because we might have already marked a + // sub-element or have circular pointers (e.g. `Rc`-cycles) + let relocations = match self.alloc_map.get_mut(&alloc_id) { + Some(&mut Allocation { ref mut relocations, ref mut kind, ref mut mutable, .. }) => { + match *kind { + // const eval results can refer to "locals". + // E.g. `const Foo: &u32 = &1;` refers to the temp local that stores the `1` + Kind::Stack | + // The entire point of this function + Kind::UninitializedStatic | + // In the future const eval will allow heap allocations so we'll need to protect them + // from deallocation, too + Kind::Rust | + Kind::C => {}, + Kind::Static => { + trace!("mark_static_initalized: skipping already initialized static referred to by static currently being initialized"); + return Ok(()); + }, + // FIXME: This could be allowed, but not for env vars set during miri execution + Kind::Env => return Err(EvalError::Unimplemented("statics can't refer to env vars".to_owned())), + } + *kind = Kind::Static; + *mutable = mutability; + // take out the relocations vector to free the borrow on self, so we can call + // mark recursively + mem::replace(relocations, Default::default()) + }, + None if !self.functions.contains_key(&alloc_id) => return Err(EvalError::DanglingPointerDeref), + _ => return Ok(()), + }; + // recurse into inner allocations + for &alloc in relocations.values() { + self.mark_inner_allocation(alloc, mutability)?; + } + // put back the relocations + self.alloc_map.get_mut(&alloc_id).expect("checked above").relocations = relocations; + Ok(()) + } + + pub fn copy(&mut self, src: Pointer, dest: Pointer, size: u64, align: u64, nonoverlapping: bool) -> EvalResult<'tcx> { + if size == 0 { + // TODO: Should we check for alignment here? (Also see write_bytes intrinsic) + return Ok(()); + } + let src = src.to_ptr()?; + let dest = dest.to_ptr()?; + self.check_relocation_edges(src, size)?; + + let src_bytes = self.get_bytes_unchecked(src, size, align)?.as_ptr(); + let dest_bytes = self.get_bytes_mut(dest, size, align)?.as_mut_ptr(); + + // SAFE: The above indexing would have panicked if there weren't at least `size` bytes + // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and + // `dest` could possibly overlap. + unsafe { + assert_eq!(size as usize as u64, size); + if src.alloc_id == dest.alloc_id { + if nonoverlapping { + if (src.offset <= dest.offset && src.offset + size > dest.offset) || + (dest.offset <= src.offset && dest.offset + size > src.offset) { + return Err(EvalError::Intrinsic(format!("copy_nonoverlapping called on overlapping ranges"))); + } + } + ptr::copy(src_bytes, dest_bytes, size as usize); + } else { + ptr::copy_nonoverlapping(src_bytes, dest_bytes, size as usize); + } + } + + self.copy_undef_mask(src, dest, size)?; + self.copy_relocations(src, dest, size)?; + + Ok(()) + } + + pub fn read_c_str(&self, ptr: MemoryPointer) -> EvalResult<'tcx, &[u8]> { + let alloc = self.get(ptr.alloc_id)?; + assert_eq!(ptr.offset as usize as u64, ptr.offset); + let offset = ptr.offset as usize; + match alloc.bytes[offset..].iter().position(|&c| c == 0) { + Some(size) => { + if self.relocations(ptr, (size + 1) as u64)?.count() != 0 { + return Err(EvalError::ReadPointerAsBytes); + } + self.check_defined(ptr, (size + 1) as u64)?; + Ok(&alloc.bytes[offset..offset + size]) + }, + None => Err(EvalError::UnterminatedCString(ptr)), + } + } + + pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> { + if size == 0 { + return Ok(&[]); + } + self.get_bytes(ptr.to_ptr()?, size, 1) + } + + pub fn write_bytes(&mut self, ptr: PrimVal, src: &[u8]) -> EvalResult<'tcx> { + if src.is_empty() { + return Ok(()); + } + let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, 1)?; + bytes.clone_from_slice(src); + Ok(()) + } + + pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> { + if count == 0 { + return Ok(()); + } + let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, 1)?; + for b in bytes { *b = val; } + Ok(()) + } + + pub fn read_ptr(&self, ptr: MemoryPointer) -> EvalResult<'tcx, Pointer> { + let size = self.pointer_size(); + if self.check_defined(ptr, size).is_err() { + return Ok(PrimVal::Undef.into()); + } + self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer + let endianess = self.endianess(); + let bytes = self.get_bytes_unchecked(ptr, size, size)?; + let offset = read_target_uint(endianess, bytes).unwrap(); + assert_eq!(offset as u64 as u128, offset); + let offset = offset as u64; + let alloc = self.get(ptr.alloc_id)?; + match alloc.relocations.get(&ptr.offset) { + Some(&alloc_id) => Ok(PrimVal::Ptr(MemoryPointer::new(alloc_id, offset)).into()), + None => Ok(PrimVal::Bytes(offset as u128).into()), + } + } + + pub fn write_ptr(&mut self, dest: MemoryPointer, ptr: MemoryPointer) -> EvalResult<'tcx> { + self.write_usize(dest, ptr.offset as u64)?; + self.get_mut(dest.alloc_id)?.relocations.insert(dest.offset, ptr.alloc_id); + Ok(()) + } + + pub fn write_primval( + &mut self, + dest: Pointer, + val: PrimVal, + size: u64, + ) -> EvalResult<'tcx> { + match val { + PrimVal::Ptr(ptr) => { + assert_eq!(size, self.pointer_size()); + self.write_ptr(dest.to_ptr()?, ptr) + } + + PrimVal::Bytes(bytes) => { + // We need to mask here, or the byteorder crate can die when given a u64 larger + // than fits in an integer of the requested size. + let mask = match size { + 1 => !0u8 as u128, + 2 => !0u16 as u128, + 4 => !0u32 as u128, + 8 => !0u64 as u128, + 16 => !0, + n => bug!("unexpected PrimVal::Bytes size: {}", n), + }; + self.write_uint(dest.to_ptr()?, bytes & mask, size) + } + + PrimVal::Undef => self.mark_definedness(dest, size, false), + } + } + + pub fn read_bool(&self, ptr: MemoryPointer) -> EvalResult<'tcx, bool> { + let bytes = self.get_bytes(ptr, 1, self.layout.i1_align.abi())?; + match bytes[0] { + 0 => Ok(false), + 1 => Ok(true), + _ => Err(EvalError::InvalidBool), + } + } + + pub fn write_bool(&mut self, ptr: MemoryPointer, b: bool) -> EvalResult<'tcx> { + let align = self.layout.i1_align.abi(); + self.get_bytes_mut(ptr, 1, align) + .map(|bytes| bytes[0] = b as u8) + } + + fn int_align(&self, size: u64) -> EvalResult<'tcx, u64> { + match size { + 1 => Ok(self.layout.i8_align.abi()), + 2 => Ok(self.layout.i16_align.abi()), + 4 => Ok(self.layout.i32_align.abi()), + 8 => Ok(self.layout.i64_align.abi()), + 16 => Ok(self.layout.i128_align.abi()), + _ => bug!("bad integer size: {}", size), + } + } + + pub fn read_int(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx, i128> { + let align = self.int_align(size)?; + self.get_bytes(ptr, size, align).map(|b| read_target_int(self.endianess(), b).unwrap()) + } + + pub fn write_int(&mut self, ptr: MemoryPointer, n: i128, size: u64) -> EvalResult<'tcx> { + let align = self.int_align(size)?; + let endianess = self.endianess(); + let b = self.get_bytes_mut(ptr, size, align)?; + write_target_int(endianess, b, n).unwrap(); + Ok(()) + } + + pub fn read_uint(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx, u128> { + let align = self.int_align(size)?; + self.get_bytes(ptr, size, align).map(|b| read_target_uint(self.endianess(), b).unwrap()) + } + + pub fn write_uint(&mut self, ptr: MemoryPointer, n: u128, size: u64) -> EvalResult<'tcx> { + let align = self.int_align(size)?; + let endianess = self.endianess(); + let b = self.get_bytes_mut(ptr, size, align)?; + write_target_uint(endianess, b, n).unwrap(); + Ok(()) + } + + pub fn read_isize(&self, ptr: MemoryPointer) -> EvalResult<'tcx, i64> { + self.read_int(ptr, self.pointer_size()).map(|i| i as i64) + } + + pub fn write_isize(&mut self, ptr: MemoryPointer, n: i64) -> EvalResult<'tcx> { + let size = self.pointer_size(); + self.write_int(ptr, n as i128, size) + } + + pub fn read_usize(&self, ptr: MemoryPointer) -> EvalResult<'tcx, u64> { + self.read_uint(ptr, self.pointer_size()).map(|i| i as u64) + } + + pub fn write_usize(&mut self, ptr: MemoryPointer, n: u64) -> EvalResult<'tcx> { + let size = self.pointer_size(); + self.write_uint(ptr, n as u128, size) + } + + pub fn write_f32(&mut self, ptr: MemoryPointer, f: f32) -> EvalResult<'tcx> { + let endianess = self.endianess(); + let align = self.layout.f32_align.abi(); + let b = self.get_bytes_mut(ptr, 4, align)?; + write_target_f32(endianess, b, f).unwrap(); + Ok(()) + } + + pub fn write_f64(&mut self, ptr: MemoryPointer, f: f64) -> EvalResult<'tcx> { + let endianess = self.endianess(); + let align = self.layout.f64_align.abi(); + let b = self.get_bytes_mut(ptr, 8, align)?; + write_target_f64(endianess, b, f).unwrap(); + Ok(()) + } + + pub fn read_f32(&self, ptr: MemoryPointer) -> EvalResult<'tcx, f32> { + self.get_bytes(ptr, 4, self.layout.f32_align.abi()) + .map(|b| read_target_f32(self.endianess(), b).unwrap()) + } + + pub fn read_f64(&self, ptr: MemoryPointer) -> EvalResult<'tcx, f64> { + self.get_bytes(ptr, 8, self.layout.f64_align.abi()) + .map(|b| read_target_f64(self.endianess(), b).unwrap()) + } +} + +/// Relocations +impl<'a, 'tcx> Memory<'a, 'tcx> { + fn relocations(&self, ptr: MemoryPointer, size: u64) + -> EvalResult<'tcx, btree_map::Range> + { + let start = ptr.offset.saturating_sub(self.pointer_size() - 1); + let end = ptr.offset + size; + Ok(self.get(ptr.alloc_id)?.relocations.range(start..end)) + } + + fn clear_relocations(&mut self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> { + // Find all relocations overlapping the given range. + let keys: Vec<_> = self.relocations(ptr, size)?.map(|(&k, _)| k).collect(); + if keys.is_empty() { return Ok(()); } + + // Find the start and end of the given range and its outermost relocations. + let start = ptr.offset; + let end = start + size; + let first = *keys.first().unwrap(); + let last = *keys.last().unwrap() + self.pointer_size(); + + let alloc = self.get_mut(ptr.alloc_id)?; + + // Mark parts of the outermost relocations as undefined if they partially fall outside the + // given range. + if first < start { alloc.undef_mask.set_range(first, start, false); } + if last > end { alloc.undef_mask.set_range(end, last, false); } + + // Forget all the relocations. + for k in keys { alloc.relocations.remove(&k); } + + Ok(()) + } + + fn check_relocation_edges(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> { + let overlapping_start = self.relocations(ptr, 0)?.count(); + let overlapping_end = self.relocations(ptr.offset(size, self.layout)?, 0)?.count(); + if overlapping_start + overlapping_end != 0 { + return Err(EvalError::ReadPointerAsBytes); + } + Ok(()) + } + + fn copy_relocations(&mut self, src: MemoryPointer, dest: MemoryPointer, size: u64) -> EvalResult<'tcx> { + let relocations: Vec<_> = self.relocations(src, size)? + .map(|(&offset, &alloc_id)| { + // Update relocation offsets for the new positions in the destination allocation. + (offset + dest.offset - src.offset, alloc_id) + }) + .collect(); + self.get_mut(dest.alloc_id)?.relocations.extend(relocations); + Ok(()) + } +} + +/// Undefined bytes +impl<'a, 'tcx> Memory<'a, 'tcx> { + // FIXME(solson): This is a very naive, slow version. + fn copy_undef_mask(&mut self, src: MemoryPointer, dest: MemoryPointer, size: u64) -> EvalResult<'tcx> { + // The bits have to be saved locally before writing to dest in case src and dest overlap. + assert_eq!(size as usize as u64, size); + let mut v = Vec::with_capacity(size as usize); + for i in 0..size { + let defined = self.get(src.alloc_id)?.undef_mask.get(src.offset + i); + v.push(defined); + } + for (i, defined) in v.into_iter().enumerate() { + self.get_mut(dest.alloc_id)?.undef_mask.set(dest.offset + i as u64, defined); + } + Ok(()) + } + + fn check_defined(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> { + let alloc = self.get(ptr.alloc_id)?; + if !alloc.undef_mask.is_range_defined(ptr.offset, ptr.offset + size) { + return Err(EvalError::ReadUndefBytes); + } + Ok(()) + } + + pub fn mark_definedness( + &mut self, + ptr: Pointer, + size: u64, + new_state: bool + ) -> EvalResult<'tcx> { + if size == 0 { + return Ok(()) + } + let ptr = ptr.to_ptr()?; + let mut alloc = self.get_mut(ptr.alloc_id)?; + alloc.undef_mask.set_range(ptr.offset, ptr.offset + size, new_state); + Ok(()) + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Methods to access integers in the target endianess +//////////////////////////////////////////////////////////////////////////////// + +fn write_target_uint(endianess: layout::Endian, mut target: &mut [u8], data: u128) -> Result<(), io::Error> { + let len = target.len(); + match endianess { + layout::Endian::Little => target.write_uint128::(data, len), + layout::Endian::Big => target.write_uint128::(data, len), + } +} +fn write_target_int(endianess: layout::Endian, mut target: &mut [u8], data: i128) -> Result<(), io::Error> { + let len = target.len(); + match endianess { + layout::Endian::Little => target.write_int128::(data, len), + layout::Endian::Big => target.write_int128::(data, len), + } +} + +fn read_target_uint(endianess: layout::Endian, mut source: &[u8]) -> Result { + match endianess { + layout::Endian::Little => source.read_uint128::(source.len()), + layout::Endian::Big => source.read_uint128::(source.len()), + } +} +fn read_target_int(endianess: layout::Endian, mut source: &[u8]) -> Result { + match endianess { + layout::Endian::Little => source.read_int128::(source.len()), + layout::Endian::Big => source.read_int128::(source.len()), + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Methods to access floats in the target endianess +//////////////////////////////////////////////////////////////////////////////// + +fn write_target_f32(endianess: layout::Endian, mut target: &mut [u8], data: f32) -> Result<(), io::Error> { + match endianess { + layout::Endian::Little => target.write_f32::(data), + layout::Endian::Big => target.write_f32::(data), + } +} +fn write_target_f64(endianess: layout::Endian, mut target: &mut [u8], data: f64) -> Result<(), io::Error> { + match endianess { + layout::Endian::Little => target.write_f64::(data), + layout::Endian::Big => target.write_f64::(data), + } +} + +fn read_target_f32(endianess: layout::Endian, mut source: &[u8]) -> Result { + match endianess { + layout::Endian::Little => source.read_f32::(), + layout::Endian::Big => source.read_f32::(), + } +} +fn read_target_f64(endianess: layout::Endian, mut source: &[u8]) -> Result { + match endianess { + layout::Endian::Little => source.read_f64::(), + layout::Endian::Big => source.read_f64::(), + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Undefined byte tracking +//////////////////////////////////////////////////////////////////////////////// + +type Block = u64; +const BLOCK_SIZE: u64 = 64; + +#[derive(Clone, Debug)] +pub struct UndefMask { + blocks: Vec, + len: u64, +} + +impl UndefMask { + fn new(size: u64) -> Self { + let mut m = UndefMask { + blocks: vec![], + len: 0, + }; + m.grow(size, false); + m + } + + /// Check whether the range `start..end` (end-exclusive) is entirely defined. + pub fn is_range_defined(&self, start: u64, end: u64) -> bool { + if end > self.len { return false; } + for i in start..end { + if !self.get(i) { return false; } + } + true + } + + fn set_range(&mut self, start: u64, end: u64, new_state: bool) { + let len = self.len; + if end > len { self.grow(end - len, new_state); } + self.set_range_inbounds(start, end, new_state); + } + + fn set_range_inbounds(&mut self, start: u64, end: u64, new_state: bool) { + for i in start..end { self.set(i, new_state); } + } + + fn get(&self, i: u64) -> bool { + let (block, bit) = bit_index(i); + (self.blocks[block] & 1 << bit) != 0 + } + + fn set(&mut self, i: u64, new_state: bool) { + let (block, bit) = bit_index(i); + if new_state { + self.blocks[block] |= 1 << bit; + } else { + self.blocks[block] &= !(1 << bit); + } + } + + fn grow(&mut self, amount: u64, new_state: bool) { + let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len; + if amount > unused_trailing_bits { + let additional_blocks = amount / BLOCK_SIZE + 1; + assert_eq!(additional_blocks as usize as u64, additional_blocks); + self.blocks.extend(iter::repeat(0).take(additional_blocks as usize)); + } + let start = self.len; + self.len += amount; + self.set_range_inbounds(start, start + amount, new_state); + } +} + +fn bit_index(bits: u64) -> (usize, usize) { + let a = bits / BLOCK_SIZE; + let b = bits % BLOCK_SIZE; + assert_eq!(a as usize as u64, a); + assert_eq!(b as usize as u64, b); + (a as usize, b as usize) +} + +//////////////////////////////////////////////////////////////////////////////// +// Unaligned accesses +//////////////////////////////////////////////////////////////////////////////// + +pub(crate) trait HasMemory<'a, 'tcx> { + fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx>; + + // These are not supposed to be overriden. + fn read_maybe_aligned(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T> + where F: FnOnce(&mut Self) -> EvalResult<'tcx, T> + { + assert!(self.memory_mut().reads_are_aligned, "Unaligned reads must not be nested"); + self.memory_mut().reads_are_aligned = aligned; + let t = f(self); + self.memory_mut().reads_are_aligned = true; + t + } + + fn write_maybe_aligned(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T> + where F: FnOnce(&mut Self) -> EvalResult<'tcx, T> + { + assert!(self.memory_mut().writes_are_aligned, "Unaligned writes must not be nested"); + self.memory_mut().writes_are_aligned = aligned; + let t = f(self); + self.memory_mut().writes_are_aligned = true; + t + } +} + +impl<'a, 'tcx> HasMemory<'a, 'tcx> for Memory<'a, 'tcx> { + #[inline] + fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx> { + self + } +} + +impl<'a, 'tcx> HasMemory<'a, 'tcx> for EvalContext<'a, 'tcx> { + #[inline] + fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx> { + &mut self.memory + } +} diff --git a/src/librustc_mir/miri/mod.rs b/src/librustc_mir/miri/mod.rs new file mode 100644 index 0000000000000..37d87675d1776 --- /dev/null +++ b/src/librustc_mir/miri/mod.rs @@ -0,0 +1,46 @@ +mod cast; +mod const_eval; +mod error; +mod eval_context; +mod lvalue; +mod memory; +mod operator; +mod step; +mod terminator; +mod traits; +mod value; + +pub use self::error::{ + EvalError, + EvalResult, +}; + +pub use self::eval_context::{ + EvalContext, + Frame, + ResourceLimits, + StackPopCleanup, + eval_main, +}; + +pub use self::lvalue::{ + Lvalue, + LvalueExtra, +}; + +pub use self::memory::{ + AllocId, + Memory, + MemoryPointer, +}; + +pub use self::value::{ + PrimVal, + PrimValKind, + Value, + Pointer, +}; + +pub use self::const_eval::{ + eval_body_as_integer, +}; diff --git a/src/librustc_mir/miri/operator.rs b/src/librustc_mir/miri/operator.rs new file mode 100644 index 0000000000000..73eae71a37b5c --- /dev/null +++ b/src/librustc_mir/miri/operator.rs @@ -0,0 +1,382 @@ +use rustc::mir; +use rustc::ty::{self, Ty}; + +use super::error::{EvalError, EvalResult}; +use super::eval_context::EvalContext; +use super::memory::MemoryPointer; +use super::lvalue::Lvalue; +use super::value::{ + PrimVal, + PrimValKind, + Value, + bytes_to_f32, + bytes_to_f64, + f32_to_bytes, + f64_to_bytes, +}; + +impl<'a, 'tcx> EvalContext<'a, 'tcx> { + fn binop_with_overflow( + &mut self, + op: mir::BinOp, + left: &mir::Operand<'tcx>, + right: &mir::Operand<'tcx>, + ) -> EvalResult<'tcx, (PrimVal, bool)> { + let left_ty = self.operand_ty(left); + let right_ty = self.operand_ty(right); + let left_val = self.eval_operand_to_primval(left)?; + let right_val = self.eval_operand_to_primval(right)?; + self.binary_op(op, left_val, left_ty, right_val, right_ty) + } + + /// Applies the binary operation `op` to the two operands and writes a tuple of the result + /// and a boolean signifying the potential overflow to the destination. + pub(super) fn intrinsic_with_overflow( + &mut self, + op: mir::BinOp, + left: &mir::Operand<'tcx>, + right: &mir::Operand<'tcx>, + dest: Lvalue<'tcx>, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + let (val, overflowed) = self.binop_with_overflow(op, left, right)?; + let val = Value::ByValPair(val, PrimVal::from_bool(overflowed)); + self.write_value(val, dest, dest_ty) + } + + /// Applies the binary operation `op` to the arguments and writes the result to the + /// destination. Returns `true` if the operation overflowed. + pub(super) fn intrinsic_overflowing( + &mut self, + op: mir::BinOp, + left: &mir::Operand<'tcx>, + right: &mir::Operand<'tcx>, + dest: Lvalue<'tcx>, + dest_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, bool> { + let (val, overflowed) = self.binop_with_overflow(op, left, right)?; + self.write_primval(dest, val, dest_ty)?; + Ok(overflowed) + } +} + +macro_rules! overflow { + ($op:ident, $l:expr, $r:expr) => ({ + let (val, overflowed) = $l.$op($r); + let primval = PrimVal::Bytes(val as u128); + Ok((primval, overflowed)) + }) +} + +macro_rules! int_arithmetic { + ($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({ + let l = $l; + let r = $r; + use super::value::PrimValKind::*; + match $kind { + I8 => overflow!($int_op, l as i8, r as i8), + I16 => overflow!($int_op, l as i16, r as i16), + I32 => overflow!($int_op, l as i32, r as i32), + I64 => overflow!($int_op, l as i64, r as i64), + I128 => overflow!($int_op, l as i128, r as i128), + U8 => overflow!($int_op, l as u8, r as u8), + U16 => overflow!($int_op, l as u16, r as u16), + U32 => overflow!($int_op, l as u32, r as u32), + U64 => overflow!($int_op, l as u64, r as u64), + U128 => overflow!($int_op, l as u128, r as u128), + _ => bug!("int_arithmetic should only be called on int primvals"), + } + }) +} + +macro_rules! int_shift { + ($kind:expr, $int_op:ident, $l:expr, $r:expr) => ({ + let l = $l; + let r = $r; + let r_wrapped = r as u32; + match $kind { + I8 => overflow!($int_op, l as i8, r_wrapped), + I16 => overflow!($int_op, l as i16, r_wrapped), + I32 => overflow!($int_op, l as i32, r_wrapped), + I64 => overflow!($int_op, l as i64, r_wrapped), + I128 => overflow!($int_op, l as i128, r_wrapped), + U8 => overflow!($int_op, l as u8, r_wrapped), + U16 => overflow!($int_op, l as u16, r_wrapped), + U32 => overflow!($int_op, l as u32, r_wrapped), + U64 => overflow!($int_op, l as u64, r_wrapped), + U128 => overflow!($int_op, l as u128, r_wrapped), + _ => bug!("int_shift should only be called on int primvals"), + }.map(|(val, over)| (val, over || r != r_wrapped as u128)) + }) +} + +macro_rules! float_arithmetic { + ($from_bytes:ident, $to_bytes:ident, $float_op:tt, $l:expr, $r:expr) => ({ + let l = $from_bytes($l); + let r = $from_bytes($r); + let bytes = $to_bytes(l $float_op r); + PrimVal::Bytes(bytes) + }) +} + +macro_rules! f32_arithmetic { + ($float_op:tt, $l:expr, $r:expr) => ( + float_arithmetic!(bytes_to_f32, f32_to_bytes, $float_op, $l, $r) + ) +} + +macro_rules! f64_arithmetic { + ($float_op:tt, $l:expr, $r:expr) => ( + float_arithmetic!(bytes_to_f64, f64_to_bytes, $float_op, $l, $r) + ) +} + +impl<'a, 'tcx> EvalContext<'a, 'tcx> { + /// Returns the result of the specified operation and whether it overflowed. + pub fn binary_op( + &self, + bin_op: mir::BinOp, + left: PrimVal, + left_ty: Ty<'tcx>, + right: PrimVal, + right_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, (PrimVal, bool)> { + use rustc::mir::BinOp::*; + use super::value::PrimValKind::*; + + let left_kind = self.ty_to_primval_kind(left_ty)?; + let right_kind = self.ty_to_primval_kind(right_ty)?; + //trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind); + + // I: Handle operations that support pointers + let usize = PrimValKind::from_uint_size(self.memory.pointer_size()); + let isize = PrimValKind::from_int_size(self.memory.pointer_size()); + if !left_kind.is_float() && !right_kind.is_float() { + if (!left.is_bytes() && !right.is_bytes()) && self.const_env() { + return Err(EvalError::NeedsRfc("Pointer arithmetic or comparison".to_string())); + } + match bin_op { + Offset if left_kind == Ptr && right_kind == usize => { + let pointee_ty = left_ty.builtin_deref(true, ty::LvaluePreference::NoPreference).expect("Offset called on non-ptr type").ty; + let ptr = self.pointer_offset(left.into(), pointee_ty, right.to_bytes()? as i64)?; + return Ok((ptr.into_inner_primval(), false)); + }, + // These work on anything + Eq if left_kind == right_kind => { + let result = match (left, right) { + (PrimVal::Bytes(left), PrimVal::Bytes(right)) => left == right, + (PrimVal::Ptr(left), PrimVal::Ptr(right)) => left == right, + (PrimVal::Undef, _) | (_, PrimVal::Undef) => return Err(EvalError::ReadUndefBytes), + _ => false, + }; + return Ok((PrimVal::from_bool(result), false)); + } + Ne if left_kind == right_kind => { + let result = match (left, right) { + (PrimVal::Bytes(left), PrimVal::Bytes(right)) => left != right, + (PrimVal::Ptr(left), PrimVal::Ptr(right)) => left != right, + (PrimVal::Undef, _) | (_, PrimVal::Undef) => return Err(EvalError::ReadUndefBytes), + _ => true, + }; + return Ok((PrimVal::from_bool(result), false)); + } + // These need both pointers to be in the same allocation + Lt | Le | Gt | Ge | Sub + if left_kind == right_kind + && (left_kind == Ptr || left_kind == usize || left_kind == isize) + && left.is_ptr() && right.is_ptr() => { + let left = left.to_ptr()?; + let right = right.to_ptr()?; + if left.alloc_id == right.alloc_id { + let res = match bin_op { + Lt => left.offset < right.offset, + Le => left.offset <= right.offset, + Gt => left.offset > right.offset, + Ge => left.offset >= right.offset, + Sub => { + return int_arithmetic!(left_kind, overflowing_sub, left.offset, right.offset); + } + _ => bug!("We already established it has to be one of these operators."), + }; + return Ok((PrimVal::from_bool(res), false)); + } else { + // Both are pointers, but from different allocations. + return Err(EvalError::InvalidPointerMath); + } + } + // These work if one operand is a pointer, the other an integer + Add | BitAnd | Sub + if left_kind == right_kind && (left_kind == usize || left_kind == isize) + && left.is_ptr() && right.is_bytes() => { + // Cast to i128 is fine as we checked the kind to be ptr-sized + return self.ptr_int_arithmetic(bin_op, left.to_ptr()?, right.to_bytes()? as i128, left_kind == isize); + } + Add | BitAnd + if left_kind == right_kind && (left_kind == usize || left_kind == isize) + && left.is_bytes() && right.is_ptr() => { + // This is a commutative operation, just swap the operands + return self.ptr_int_arithmetic(bin_op, right.to_ptr()?, left.to_bytes()? as i128, left_kind == isize); + } + _ => {} + } + } + + // II: From now on, everything must be bytes, no pointers + let l = left.to_bytes()?; + let r = right.to_bytes()?; + + // These ops can have an RHS with a different numeric type. + if right_kind.is_int() && (bin_op == Shl || bin_op == Shr) { + return match bin_op { + Shl => int_shift!(left_kind, overflowing_shl, l, r), + Shr => int_shift!(left_kind, overflowing_shr, l, r), + _ => bug!("it has already been checked that this is a shift op"), + }; + } + + if left_kind != right_kind { + let msg = format!("unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind); + return Err(EvalError::Unimplemented(msg)); + } + + let val = match (bin_op, left_kind) { + (Eq, F32) => PrimVal::from_bool(bytes_to_f32(l) == bytes_to_f32(r)), + (Ne, F32) => PrimVal::from_bool(bytes_to_f32(l) != bytes_to_f32(r)), + (Lt, F32) => PrimVal::from_bool(bytes_to_f32(l) < bytes_to_f32(r)), + (Le, F32) => PrimVal::from_bool(bytes_to_f32(l) <= bytes_to_f32(r)), + (Gt, F32) => PrimVal::from_bool(bytes_to_f32(l) > bytes_to_f32(r)), + (Ge, F32) => PrimVal::from_bool(bytes_to_f32(l) >= bytes_to_f32(r)), + + (Eq, F64) => PrimVal::from_bool(bytes_to_f64(l) == bytes_to_f64(r)), + (Ne, F64) => PrimVal::from_bool(bytes_to_f64(l) != bytes_to_f64(r)), + (Lt, F64) => PrimVal::from_bool(bytes_to_f64(l) < bytes_to_f64(r)), + (Le, F64) => PrimVal::from_bool(bytes_to_f64(l) <= bytes_to_f64(r)), + (Gt, F64) => PrimVal::from_bool(bytes_to_f64(l) > bytes_to_f64(r)), + (Ge, F64) => PrimVal::from_bool(bytes_to_f64(l) >= bytes_to_f64(r)), + + (Add, F32) => f32_arithmetic!(+, l, r), + (Sub, F32) => f32_arithmetic!(-, l, r), + (Mul, F32) => f32_arithmetic!(*, l, r), + (Div, F32) => f32_arithmetic!(/, l, r), + (Rem, F32) => f32_arithmetic!(%, l, r), + + (Add, F64) => f64_arithmetic!(+, l, r), + (Sub, F64) => f64_arithmetic!(-, l, r), + (Mul, F64) => f64_arithmetic!(*, l, r), + (Div, F64) => f64_arithmetic!(/, l, r), + (Rem, F64) => f64_arithmetic!(%, l, r), + + (Lt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) < (r as i128)), + (Lt, _) => PrimVal::from_bool(l < r), + (Le, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) <= (r as i128)), + (Le, _) => PrimVal::from_bool(l <= r), + (Gt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) > (r as i128)), + (Gt, _) => PrimVal::from_bool(l > r), + (Ge, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) >= (r as i128)), + (Ge, _) => PrimVal::from_bool(l >= r), + + (BitOr, _) => PrimVal::Bytes(l | r), + (BitAnd, _) => PrimVal::Bytes(l & r), + (BitXor, _) => PrimVal::Bytes(l ^ r), + + (Add, k) if k.is_int() => return int_arithmetic!(k, overflowing_add, l, r), + (Sub, k) if k.is_int() => return int_arithmetic!(k, overflowing_sub, l, r), + (Mul, k) if k.is_int() => return int_arithmetic!(k, overflowing_mul, l, r), + (Div, k) if k.is_int() => return int_arithmetic!(k, overflowing_div, l, r), + (Rem, k) if k.is_int() => return int_arithmetic!(k, overflowing_rem, l, r), + + _ => { + let msg = format!("unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind); + return Err(EvalError::Unimplemented(msg)); + } + }; + + Ok((val, false)) + } + + fn ptr_int_arithmetic( + &self, + bin_op: mir::BinOp, + left: MemoryPointer, + right: i128, + signed: bool, + ) -> EvalResult<'tcx, (PrimVal, bool)> { + use rustc::mir::BinOp::*; + + fn map_to_primval((res, over) : (MemoryPointer, bool)) -> (PrimVal, bool) { + (PrimVal::Ptr(res), over) + } + + Ok(match bin_op { + Sub => + // The only way this can overflow is by underflowing, so signdeness of the right operands does not matter + map_to_primval(left.overflowing_signed_offset(-right, self.memory.layout)), + Add if signed => + map_to_primval(left.overflowing_signed_offset(right, self.memory.layout)), + Add if !signed => + map_to_primval(left.overflowing_offset(right as u64, self.memory.layout)), + + BitAnd if !signed => { + let base_mask : u64 = !(self.memory.get(left.alloc_id)?.align - 1); + let right = right as u64; + if right & base_mask == base_mask { + // Case 1: The base address bits are all preserved, i.e., right is all-1 there + (PrimVal::Ptr(MemoryPointer::new(left.alloc_id, left.offset & right)), false) + } else if right & base_mask == 0 { + // Case 2: The base address bits are all taken away, i.e., right is all-0 there + (PrimVal::from_u128((left.offset & right) as u128), false) + } else { + return Err(EvalError::ReadPointerAsBytes); + } + } + + _ => { + let msg = format!("unimplemented binary op on pointer {:?}: {:?}, {:?} ({})", bin_op, left, right, if signed { "signed" } else { "unsigned" }); + return Err(EvalError::Unimplemented(msg)); + } + }) + } +} + +pub fn unary_op<'tcx>( + un_op: mir::UnOp, + val: PrimVal, + val_kind: PrimValKind, +) -> EvalResult<'tcx, PrimVal> { + use rustc::mir::UnOp::*; + use super::value::PrimValKind::*; + + let bytes = val.to_bytes()?; + + let result_bytes = match (un_op, val_kind) { + (Not, Bool) => !val.to_bool()? as u128, + + (Not, U8) => !(bytes as u8) as u128, + (Not, U16) => !(bytes as u16) as u128, + (Not, U32) => !(bytes as u32) as u128, + (Not, U64) => !(bytes as u64) as u128, + (Not, U128) => !bytes, + + (Not, I8) => !(bytes as i8) as u128, + (Not, I16) => !(bytes as i16) as u128, + (Not, I32) => !(bytes as i32) as u128, + (Not, I64) => !(bytes as i64) as u128, + (Not, I128) => !(bytes as i128) as u128, + + (Neg, I8) => -(bytes as i8) as u128, + (Neg, I16) => -(bytes as i16) as u128, + (Neg, I32) => -(bytes as i32) as u128, + (Neg, I64) => -(bytes as i64) as u128, + (Neg, I128) => -(bytes as i128) as u128, + + (Neg, F32) => f32_to_bytes(-bytes_to_f32(bytes)), + (Neg, F64) => f64_to_bytes(-bytes_to_f64(bytes)), + + _ => { + let msg = format!("unimplemented unary op: {:?}, {:?}", un_op, val); + return Err(EvalError::Unimplemented(msg)); + } + }; + + Ok(PrimVal::Bytes(result_bytes)) +} diff --git a/src/librustc_mir/miri/step.rs b/src/librustc_mir/miri/step.rs new file mode 100644 index 0000000000000..4f725ff6a62fe --- /dev/null +++ b/src/librustc_mir/miri/step.rs @@ -0,0 +1,281 @@ +//! This module contains the `EvalContext` methods for executing a single step of the interpreter. +//! +//! The main entry point is the `step` method. + +use rustc::hir::def_id::DefId; +use rustc::hir; +use rustc::mir::visit::{Visitor, LvalueContext}; +use rustc::mir; +use rustc::traits::Reveal; +use rustc::ty::layout::Layout; +use rustc::ty::{subst, self}; + +use super::error::{EvalResult, EvalError}; +use super::eval_context::{EvalContext, StackPopCleanup}; +use super::lvalue::{Global, GlobalId, Lvalue}; +use super::value::{Value, PrimVal}; + +use syntax::codemap::Span; +use syntax::ast::Mutability; + +impl<'a, 'tcx> EvalContext<'a, 'tcx> { + pub fn inc_step_counter_and_check_limit(&mut self, n: u64) -> EvalResult<'tcx> { + self.steps_remaining = self.steps_remaining.saturating_sub(n); + if self.steps_remaining > 0 { + Ok(()) + } else { + Err(EvalError::ExecutionTimeLimitReached) + } + } + + /// Returns true as long as there are more things to do. + pub fn step(&mut self) -> EvalResult<'tcx, bool> { + self.inc_step_counter_and_check_limit(1)?; + if self.stack.is_empty() { + return Ok(false); + } + + let block = self.frame().block; + let stmt_id = self.frame().stmt; + let mir = self.mir(); + let basic_block = &mir.basic_blocks()[block]; + + if let Some(stmt) = basic_block.statements.get(stmt_id) { + let mut new = Ok(0); + ConstantExtractor { + span: stmt.source_info.span, + instance: self.frame().instance, + ecx: self, + mir, + new_constants: &mut new, + }.visit_statement(block, stmt, mir::Location { block, statement_index: stmt_id }); + // if ConstantExtractor added new frames, we don't execute anything here + // but await the next call to step + if new? == 0 { + self.statement(stmt)?; + } + return Ok(true); + } + + let terminator = basic_block.terminator(); + let mut new = Ok(0); + ConstantExtractor { + span: terminator.source_info.span, + instance: self.frame().instance, + ecx: self, + mir, + new_constants: &mut new, + }.visit_terminator(block, terminator, mir::Location { block, statement_index: stmt_id }); + // if ConstantExtractor added new frames, we don't execute anything here + // but await the next call to step + if new? == 0 { + self.terminator(terminator)?; + } + Ok(true) + } + + fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> { + trace!("{:?}", stmt); + + use rustc::mir::StatementKind::*; + match stmt.kind { + Assign(ref lvalue, ref rvalue) => self.eval_rvalue_into_lvalue(rvalue, lvalue)?, + + SetDiscriminant { ref lvalue, variant_index } => { + let dest = self.eval_lvalue(lvalue)?; + let dest_ty = self.lvalue_ty(lvalue); + let dest_layout = self.type_layout(dest_ty)?; + + match *dest_layout { + Layout::General { discr, .. } => { + // FIXME: I (oli-obk) think we need to check the + // `dest_ty` for the variant's discriminant and write + // instead of the variant index + // We don't have any tests actually going through these lines + let discr_ty = discr.to_ty(&self.tcx, false); + let discr_lval = self.lvalue_field(dest, 0, dest_ty, discr_ty)?; + + self.write_value(Value::ByVal(PrimVal::Bytes(variant_index as u128)), discr_lval, discr_ty)?; + } + + Layout::RawNullablePointer { nndiscr, .. } => { + if variant_index as u64 != nndiscr { + self.write_null(dest, dest_ty)?; + } + } + + _ => bug!("SetDiscriminant on {} represented as {:#?}", dest_ty, dest_layout), + } + } + + // Mark locals as dead or alive. + StorageLive(ref lvalue) | StorageDead(ref lvalue)=> { + let (frame, local) = match self.eval_lvalue(lvalue)? { + Lvalue::Local{ frame, local } if self.stack.len() == frame+1 => (frame, local), + _ => return Err(EvalError::Unimplemented("Storage annotations must refer to locals of the topmost stack frame.".to_owned())) // FIXME maybe this should get its own error type + }; + let old_val = match stmt.kind { + StorageLive(_) => self.stack[frame].storage_live(local)?, + StorageDead(_) => self.stack[frame].storage_dead(local)?, + _ => bug!("We already checked that we are a storage stmt") + }; + self.deallocate_local(old_val)?; + } + + // Just a borrowck thing + EndRegion(..) => {} + + // Defined to do nothing. These are added by optimization passes, to avoid changing the + // size of MIR constantly. + Nop => {} + + InlineAsm { .. } => return Err(EvalError::InlineAsm), + } + + self.frame_mut().stmt += 1; + Ok(()) + } + + fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> { + trace!("{:?}", terminator.kind); + self.eval_terminator(terminator)?; + if !self.stack.is_empty() { + trace!("// {:?}", self.frame().block); + } + Ok(()) + } +} + +// WARNING: make sure that any methods implemented on this type don't ever access ecx.stack +// this includes any method that might access the stack +// basically don't call anything other than `load_mir`, `alloc_ptr`, `push_stack_frame` +// The reason for this is, that `push_stack_frame` modifies the stack out of obvious reasons +struct ConstantExtractor<'a, 'b: 'a, 'tcx: 'b> { + span: Span, + ecx: &'a mut EvalContext<'b, 'tcx>, + mir: &'tcx mir::Mir<'tcx>, + instance: ty::Instance<'tcx>, + new_constants: &'a mut EvalResult<'tcx, u64>, +} + +impl<'a, 'b, 'tcx> ConstantExtractor<'a, 'b, 'tcx> { + fn global_item( + &mut self, + def_id: DefId, + substs: &'tcx subst::Substs<'tcx>, + span: Span, + mutability: Mutability, + ) { + let instance = self.ecx.resolve_associated_const(def_id, substs); + let cid = GlobalId { instance, promoted: None }; + if self.ecx.globals.contains_key(&cid) { + return; + } + if self.ecx.tcx.has_attr(def_id, "linkage") { + trace!("Initializing an extern global with NULL"); + self.ecx.globals.insert(cid, Global::initialized(self.ecx.tcx.type_of(def_id), Value::ByVal(PrimVal::Bytes(0)), mutability)); + return; + } + self.try(|this| { + let mir = this.ecx.load_mir(instance.def)?; + this.ecx.globals.insert(cid, Global::uninitialized(mir.return_ty)); + let internally_mutable = !mir.return_ty.is_freeze( + this.ecx.tcx, + ty::ParamEnv::empty(Reveal::All), + span); + let mutability = if mutability == Mutability::Mutable || internally_mutable { + Mutability::Mutable + } else { + Mutability::Immutable + }; + let cleanup = StackPopCleanup::MarkStatic(mutability); + let name = ty::tls::with(|tcx| tcx.item_path_str(def_id)); + trace!("pushing stack frame for global: {}", name); + this.ecx.push_stack_frame( + instance, + span, + mir, + Lvalue::Global(cid), + cleanup, + ) + }); + } + + fn try EvalResult<'tcx>>(&mut self, f: F) { + if let Ok(ref mut n) = *self.new_constants { + *n += 1; + } else { + return; + } + if let Err(e) = f(self) { + *self.new_constants = Err(e); + } + } +} + +impl<'a, 'b, 'tcx> Visitor<'tcx> for ConstantExtractor<'a, 'b, 'tcx> { + fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: mir::Location) { + self.super_constant(constant, location); + match constant.literal { + // already computed by rustc + mir::Literal::Value { .. } => {} + mir::Literal::Item { def_id, substs } => { + self.global_item(def_id, substs, constant.span, Mutability::Immutable); + }, + mir::Literal::Promoted { index } => { + let cid = GlobalId { + instance: self.instance, + promoted: Some(index), + }; + if self.ecx.globals.contains_key(&cid) { + return; + } + let mir = &self.mir.promoted[index]; + self.try(|this| { + let ty = this.ecx.monomorphize(mir.return_ty, this.instance.substs); + this.ecx.globals.insert(cid, Global::uninitialized(ty)); + trace!("pushing stack frame for {:?}", index); + this.ecx.push_stack_frame(this.instance, + constant.span, + mir, + Lvalue::Global(cid), + StackPopCleanup::MarkStatic(Mutability::Immutable), + ) + }); + } + } + } + + fn visit_lvalue( + &mut self, + lvalue: &mir::Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: mir::Location + ) { + self.super_lvalue(lvalue, context, location); + if let mir::Lvalue::Static(ref static_) = *lvalue { + let def_id = static_.def_id; + let substs = self.ecx.tcx.intern_substs(&[]); + let span = self.span; + if let Some(node_item) = self.ecx.tcx.hir.get_if_local(def_id) { + if let hir::map::Node::NodeItem(&hir::Item { ref node, .. }) = node_item { + if let hir::ItemStatic(_, m, _) = *node { + self.global_item(def_id, substs, span, if m == hir::MutMutable { Mutability::Mutable } else { Mutability::Immutable }); + return; + } else { + bug!("static def id doesn't point to static"); + } + } else { + bug!("static def id doesn't point to item"); + } + } else { + let def = self.ecx.tcx.describe_def(def_id).expect("static not found"); + if let hir::def::Def::Static(_, mutable) = def { + self.global_item(def_id, substs, span, if mutable { Mutability::Mutable } else { Mutability::Immutable }); + } else { + bug!("static found but isn't a static: {:?}", def); + } + } + } + } +} diff --git a/src/librustc_mir/miri/terminator/drop.rs b/src/librustc_mir/miri/terminator/drop.rs new file mode 100644 index 0000000000000..faf891c327c71 --- /dev/null +++ b/src/librustc_mir/miri/terminator/drop.rs @@ -0,0 +1,65 @@ +use rustc::mir; +use rustc::ty::{self, Ty}; +use syntax::codemap::Span; + +use miri::error::EvalResult; +use miri::eval_context::{EvalContext, StackPopCleanup}; +use miri::lvalue::{Lvalue, LvalueExtra}; +use miri::value::{Value, PrimVal}; + +impl<'a, 'tcx> EvalContext<'a, 'tcx> { + pub(crate) fn drop_lvalue(&mut self, lval: Lvalue<'tcx>, instance: ty::Instance<'tcx>, ty: Ty<'tcx>, span: Span) -> EvalResult<'tcx> { + trace!("drop_lvalue: {:#?}", lval); + // We take the address of the object. This may well be unaligned, which is fine for us here. + // However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared + // by rustc. + let val = match self.force_allocation(lval)? { + Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable), aligned: _ } => ptr.to_value_with_vtable(vtable), + Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len), aligned: _ } => ptr.to_value_with_len(len), + Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: _ } => ptr.to_value(), + _ => bug!("force_allocation broken"), + }; + self.drop(val, instance, ty, span) + } + pub(crate) fn drop(&mut self, arg: Value, mut instance: ty::Instance<'tcx>, ty: Ty<'tcx>, span: Span) -> EvalResult<'tcx> { + trace!("drop: {:#?}, {:?}, {:?}", arg, ty.sty, instance.def); + + if let ty::InstanceDef::DropGlue(_, None) = instance.def { + trace!("nothing to do, aborting"); + // we don't actually need to drop anything + return Ok(()); + } + let mir = match ty.sty { + ty::TyDynamic(..) => { + let vtable = match arg { + Value::ByValPair(_, PrimVal::Ptr(vtable)) => vtable, + _ => bug!("expected fat ptr, got {:?}", arg), + }; + match self.read_drop_type_from_vtable(vtable)? { + Some(func) => { + instance = func; + self.load_mir(func.def)? + }, + // no drop fn -> bail out + None => return Ok(()), + } + }, + _ => self.load_mir(instance.def)?, + }; + + self.push_stack_frame( + instance, + span, + mir, + Lvalue::undef(), + StackPopCleanup::None, + )?; + + let mut arg_locals = self.frame().mir.args_iter(); + assert_eq!(self.frame().mir.arg_count, 1); + let arg_local = arg_locals.next().unwrap(); + let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; + let arg_ty = self.tcx.mk_mut_ptr(ty); + self.write_value(arg, dest, arg_ty) + } +} diff --git a/src/librustc_mir/miri/terminator/intrinsic.rs b/src/librustc_mir/miri/terminator/intrinsic.rs new file mode 100644 index 0000000000000..bb04ba3fd63d0 --- /dev/null +++ b/src/librustc_mir/miri/terminator/intrinsic.rs @@ -0,0 +1,612 @@ +use rustc::mir; +use rustc::traits::Reveal; +use rustc::ty::layout::{Layout, Size, Align}; +use rustc::ty::subst::Substs; +use rustc::ty::{self, Ty}; + +use miri::error::{EvalError, EvalResult}; +use miri::eval_context::EvalContext; +use miri::lvalue::{Lvalue, LvalueExtra}; +use miri::value::{PrimVal, PrimValKind, Value, Pointer}; +use miri::memory::HasMemory; + +impl<'a, 'tcx> EvalContext<'a, 'tcx> { + pub(super) fn call_intrinsic( + &mut self, + instance: ty::Instance<'tcx>, + args: &[mir::Operand<'tcx>], + dest: Lvalue<'tcx>, + dest_ty: Ty<'tcx>, + dest_layout: &'tcx Layout, + target: mir::BasicBlock, + ) -> EvalResult<'tcx> { + let arg_vals: EvalResult> = args.iter() + .map(|arg| self.eval_operand(arg)) + .collect(); + let arg_vals = arg_vals?; + let i32 = self.tcx.types.i32; + let isize = self.tcx.types.isize; + let usize = self.tcx.types.usize; + let f32 = self.tcx.types.f32; + let f64 = self.tcx.types.f64; + let substs = instance.substs; + + let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..]; + match intrinsic_name { + "add_with_overflow" => + self.intrinsic_with_overflow(mir::BinOp::Add, &args[0], &args[1], dest, dest_ty)?, + + "sub_with_overflow" => + self.intrinsic_with_overflow(mir::BinOp::Sub, &args[0], &args[1], dest, dest_ty)?, + + "mul_with_overflow" => + self.intrinsic_with_overflow(mir::BinOp::Mul, &args[0], &args[1], dest, dest_ty)?, + + + "arith_offset" => { + let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64; + let ptr = arg_vals[0].into_ptr(&mut self.memory)?; + let result_ptr = self.wrapping_pointer_offset(ptr, substs.type_at(0), offset)?; + self.write_ptr(dest, result_ptr, dest_ty)?; + } + + "assume" => { + let bool = self.tcx.types.bool; + let cond = self.value_to_primval(arg_vals[0], bool)?.to_bool()?; + if !cond { return Err(EvalError::AssumptionNotHeld); } + } + + "atomic_load" | + "atomic_load_relaxed" | + "atomic_load_acq" | + "volatile_load" => { + let ty = substs.type_at(0); + let ptr = arg_vals[0].into_ptr(&mut self.memory)?; + self.write_value(Value::by_ref(ptr), dest, ty)?; + } + + "atomic_store" | + "atomic_store_relaxed" | + "atomic_store_rel" | + "volatile_store" => { + let ty = substs.type_at(0); + let dest = arg_vals[0].into_ptr(&mut self.memory)?; + self.write_value_to_ptr(arg_vals[1], dest, ty)?; + } + + "atomic_fence_acq" => { + // we are inherently singlethreaded and singlecored, this is a nop + } + + _ if intrinsic_name.starts_with("atomic_xchg") => { + let ty = substs.type_at(0); + let ptr = arg_vals[0].into_ptr(&mut self.memory)?; + let change = self.value_to_primval(arg_vals[1], ty)?; + let old = self.read_value(ptr, ty)?; + let old = match old { + Value::ByVal(val) => val, + Value::ByRef(..) => bug!("just read the value, can't be byref"), + Value::ByValPair(..) => bug!("atomic_xchg doesn't work with nonprimitives"), + }; + self.write_primval(dest, old, ty)?; + self.write_primval(Lvalue::from_primval_ptr(ptr), change, ty)?; + } + + _ if intrinsic_name.starts_with("atomic_cxchg") => { + let ty = substs.type_at(0); + let ptr = arg_vals[0].into_ptr(&mut self.memory)?; + let expect_old = self.value_to_primval(arg_vals[1], ty)?; + let change = self.value_to_primval(arg_vals[2], ty)?; + let old = self.read_value(ptr, ty)?; + let old = match old { + Value::ByVal(val) => val, + Value::ByRef(..) => bug!("just read the value, can't be byref"), + Value::ByValPair(..) => bug!("atomic_cxchg doesn't work with nonprimitives"), + }; + let (val, _) = self.binary_op(mir::BinOp::Eq, old, ty, expect_old, ty)?; + let dest = self.force_allocation(dest)?.to_ptr()?; + self.write_pair_to_ptr(old, val, dest, dest_ty)?; + self.write_primval(Lvalue::from_primval_ptr(ptr), change, ty)?; + } + + "atomic_or" | "atomic_or_acq" | "atomic_or_rel" | "atomic_or_acqrel" | "atomic_or_relaxed" | + "atomic_xor" | "atomic_xor_acq" | "atomic_xor_rel" | "atomic_xor_acqrel" | "atomic_xor_relaxed" | + "atomic_and" | "atomic_and_acq" | "atomic_and_rel" | "atomic_and_acqrel" | "atomic_and_relaxed" | + "atomic_xadd" | "atomic_xadd_acq" | "atomic_xadd_rel" | "atomic_xadd_acqrel" | "atomic_xadd_relaxed" | + "atomic_xsub" | "atomic_xsub_acq" | "atomic_xsub_rel" | "atomic_xsub_acqrel" | "atomic_xsub_relaxed" => { + let ty = substs.type_at(0); + let ptr = arg_vals[0].into_ptr(&mut self.memory)?; + let change = self.value_to_primval(arg_vals[1], ty)?; + let old = self.read_value(ptr, ty)?; + let old = match old { + Value::ByVal(val) => val, + Value::ByRef(..) => bug!("just read the value, can't be byref"), + Value::ByValPair(..) => bug!("atomic_xadd_relaxed doesn't work with nonprimitives"), + }; + self.write_primval(dest, old, ty)?; + let op = match intrinsic_name.split('_').nth(1).unwrap() { + "or" => mir::BinOp::BitOr, + "xor" => mir::BinOp::BitXor, + "and" => mir::BinOp::BitAnd, + "xadd" => mir::BinOp::Add, + "xsub" => mir::BinOp::Sub, + _ => bug!(), + }; + // FIXME: what do atomics do on overflow? + let (val, _) = self.binary_op(op, old, ty, change, ty)?; + self.write_primval(Lvalue::from_primval_ptr(ptr), val, ty)?; + }, + + "breakpoint" => unimplemented!(), // halt miri + + "copy" | + "copy_nonoverlapping" => { + let elem_ty = substs.type_at(0); + let elem_size = self.type_size(elem_ty)?.expect("cannot copy unsized value"); + if elem_size != 0 { + let elem_align = self.type_align(elem_ty)?; + let src = arg_vals[0].into_ptr(&mut self.memory)?; + let dest = arg_vals[1].into_ptr(&mut self.memory)?; + let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?; + self.memory.copy(src, dest, count * elem_size, elem_align, intrinsic_name.ends_with("_nonoverlapping"))?; + } + } + + "ctpop" | + "cttz" | + "cttz_nonzero" | + "ctlz" | + "ctlz_nonzero" | + "bswap" => { + let ty = substs.type_at(0); + let num = self.value_to_primval(arg_vals[0], ty)?.to_bytes()?; + let kind = self.ty_to_primval_kind(ty)?; + let num = if intrinsic_name.ends_with("_nonzero") { + if num == 0 { + return Err(EvalError::Intrinsic(format!("{} called on 0", intrinsic_name))) + } + numeric_intrinsic(intrinsic_name.trim_right_matches("_nonzero"), num, kind)? + } else { + numeric_intrinsic(intrinsic_name, num, kind)? + }; + self.write_primval(dest, num, ty)?; + } + + "discriminant_value" => { + let ty = substs.type_at(0); + let adt_ptr = arg_vals[0].into_ptr(&mut self.memory)?.to_ptr()?; + let discr_val = self.read_discriminant_value(adt_ptr, ty)?; + self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?; + } + + "sinf32" | "fabsf32" | "cosf32" | + "sqrtf32" | "expf32" | "exp2f32" | + "logf32" | "log10f32" | "log2f32" | + "floorf32" | "ceilf32" | "truncf32" => { + let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?; + let f = match intrinsic_name { + "sinf32" => f.sin(), + "fabsf32" => f.abs(), + "cosf32" => f.cos(), + "sqrtf32" => f.sqrt(), + "expf32" => f.exp(), + "exp2f32" => f.exp2(), + "logf32" => f.ln(), + "log10f32" => f.log10(), + "log2f32" => f.log2(), + "floorf32" => f.floor(), + "ceilf32" => f.ceil(), + "truncf32" => f.trunc(), + _ => bug!(), + }; + self.write_primval(dest, PrimVal::from_f32(f), dest_ty)?; + } + + "sinf64" | "fabsf64" | "cosf64" | + "sqrtf64" | "expf64" | "exp2f64" | + "logf64" | "log10f64" | "log2f64" | + "floorf64" | "ceilf64" | "truncf64" => { + let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?; + let f = match intrinsic_name { + "sinf64" => f.sin(), + "fabsf64" => f.abs(), + "cosf64" => f.cos(), + "sqrtf64" => f.sqrt(), + "expf64" => f.exp(), + "exp2f64" => f.exp2(), + "logf64" => f.ln(), + "log10f64" => f.log10(), + "log2f64" => f.log2(), + "floorf64" => f.floor(), + "ceilf64" => f.ceil(), + "truncf64" => f.trunc(), + _ => bug!(), + }; + self.write_primval(dest, PrimVal::from_f64(f), dest_ty)?; + } + + "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { + let ty = substs.type_at(0); + let a = self.value_to_primval(arg_vals[0], ty)?; + let b = self.value_to_primval(arg_vals[1], ty)?; + let op = match intrinsic_name { + "fadd_fast" => mir::BinOp::Add, + "fsub_fast" => mir::BinOp::Sub, + "fmul_fast" => mir::BinOp::Mul, + "fdiv_fast" => mir::BinOp::Div, + "frem_fast" => mir::BinOp::Rem, + _ => bug!(), + }; + let result = self.binary_op(op, a, ty, b, ty)?; + self.write_primval(dest, result.0, dest_ty)?; + } + + "likely" | + "unlikely" | + "forget" => {} + + "init" => { + let size = self.type_size(dest_ty)?.expect("cannot zero unsized value"); + let init = |this: &mut Self, val: Value| { + let zero_val = match val { + Value::ByRef(ptr, aligned) => { + // These writes have no alignment restriction anyway. + this.memory.write_repeat(ptr, 0, size)?; + Value::ByRef(ptr, aligned) + }, + // TODO(solson): Revisit this, it's fishy to check for Undef here. + Value::ByVal(PrimVal::Undef) => match this.ty_to_primval_kind(dest_ty) { + Ok(_) => Value::ByVal(PrimVal::Bytes(0)), + Err(_) => { + let ptr = this.alloc_ptr_with_substs(dest_ty, substs)?; + let ptr = Pointer::from(PrimVal::Ptr(ptr)); + this.memory.write_repeat(ptr, 0, size)?; + Value::by_ref(ptr) + } + }, + Value::ByVal(_) => Value::ByVal(PrimVal::Bytes(0)), + Value::ByValPair(..) => + Value::ByValPair(PrimVal::Bytes(0), PrimVal::Bytes(0)), + }; + Ok(zero_val) + }; + match dest { + Lvalue::Local { frame, local } => self.modify_local(frame, local, init)?, + Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } => self.memory.write_repeat(ptr, 0, size)?, + Lvalue::Ptr { .. } => bug!("init intrinsic tried to write to fat or unaligned ptr target"), + Lvalue::Global(cid) => self.modify_global(cid, init)?, + } + } + + "min_align_of" => { + let elem_ty = substs.type_at(0); + let elem_align = self.type_align(elem_ty)?; + let align_val = PrimVal::from_u128(elem_align as u128); + self.write_primval(dest, align_val, dest_ty)?; + } + + "pref_align_of" => { + let ty = substs.type_at(0); + let layout = self.type_layout(ty)?; + let align = layout.align(&self.tcx.data_layout).pref(); + let align_val = PrimVal::from_u128(align as u128); + self.write_primval(dest, align_val, dest_ty)?; + } + + "move_val_init" => { + let ty = substs.type_at(0); + let ptr = arg_vals[0].into_ptr(&mut self.memory)?; + self.write_value_to_ptr(arg_vals[1], ptr, ty)?; + } + + "needs_drop" => { + let ty = substs.type_at(0); + let env = ty::ParamEnv::empty(Reveal::All); + let needs_drop = ty.needs_drop(self.tcx, env); + self.write_primval(dest, PrimVal::from_bool(needs_drop), dest_ty)?; + } + + "offset" => { + let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64; + let ptr = arg_vals[0].into_ptr(&mut self.memory)?; + let result_ptr = self.pointer_offset(ptr, substs.type_at(0), offset)?; + self.write_ptr(dest, result_ptr, dest_ty)?; + } + + "overflowing_sub" => { + self.intrinsic_overflowing(mir::BinOp::Sub, &args[0], &args[1], dest, dest_ty)?; + } + + "overflowing_mul" => { + self.intrinsic_overflowing(mir::BinOp::Mul, &args[0], &args[1], dest, dest_ty)?; + } + + "overflowing_add" => { + self.intrinsic_overflowing(mir::BinOp::Add, &args[0], &args[1], dest, dest_ty)?; + } + + "powf32" => { + let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?; + let f2 = self.value_to_primval(arg_vals[1], f32)?.to_f32()?; + self.write_primval(dest, PrimVal::from_f32(f.powf(f2)), dest_ty)?; + } + + "powf64" => { + let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?; + let f2 = self.value_to_primval(arg_vals[1], f64)?.to_f64()?; + self.write_primval(dest, PrimVal::from_f64(f.powf(f2)), dest_ty)?; + } + + "fmaf32" => { + let a = self.value_to_primval(arg_vals[0], f32)?.to_f32()?; + let b = self.value_to_primval(arg_vals[1], f32)?.to_f32()?; + let c = self.value_to_primval(arg_vals[2], f32)?.to_f32()?; + self.write_primval(dest, PrimVal::from_f32(a * b + c), dest_ty)?; + } + + "fmaf64" => { + let a = self.value_to_primval(arg_vals[0], f64)?.to_f64()?; + let b = self.value_to_primval(arg_vals[1], f64)?.to_f64()?; + let c = self.value_to_primval(arg_vals[2], f64)?.to_f64()?; + self.write_primval(dest, PrimVal::from_f64(a * b + c), dest_ty)?; + } + + "powif32" => { + let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?; + let i = self.value_to_primval(arg_vals[1], i32)?.to_i128()?; + self.write_primval(dest, PrimVal::from_f32(f.powi(i as i32)), dest_ty)?; + } + + "powif64" => { + let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?; + let i = self.value_to_primval(arg_vals[1], i32)?.to_i128()?; + self.write_primval(dest, PrimVal::from_f64(f.powi(i as i32)), dest_ty)?; + } + + "size_of" => { + let ty = substs.type_at(0); + let size = self.type_size(ty)?.expect("size_of intrinsic called on unsized value") as u128; + self.write_primval(dest, PrimVal::from_u128(size), dest_ty)?; + } + + "size_of_val" => { + let ty = substs.type_at(0); + let (size, _) = self.size_and_align_of_dst(ty, arg_vals[0])?; + self.write_primval(dest, PrimVal::from_u128(size as u128), dest_ty)?; + } + + "min_align_of_val" | + "align_of_val" => { + let ty = substs.type_at(0); + let (_, align) = self.size_and_align_of_dst(ty, arg_vals[0])?; + self.write_primval(dest, PrimVal::from_u128(align as u128), dest_ty)?; + } + + "type_name" => { + let ty = substs.type_at(0); + let ty_name = ty.to_string(); + let s = self.str_to_value(&ty_name)?; + self.write_value(s, dest, dest_ty)?; + } + "type_id" => { + let ty = substs.type_at(0); + let n = self.tcx.type_id_hash(ty); + self.write_primval(dest, PrimVal::Bytes(n as u128), dest_ty)?; + } + + "transmute" => { + let src_ty = substs.type_at(0); + let ptr = self.force_allocation(dest)?.to_ptr()?; + self.write_maybe_aligned(/*aligned*/false, |ectx| { + ectx.write_value_to_ptr(arg_vals[0], ptr.into(), src_ty) + })?; + } + + "unchecked_shl" => { + let bits = self.type_size(dest_ty)?.expect("intrinsic can't be called on unsized type") as u128 * 8; + let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?; + if rhs >= bits { + return Err(EvalError::Intrinsic(format!("Overflowing shift by {} in unchecked_shl", rhs))); + } + self.intrinsic_overflowing(mir::BinOp::Shl, &args[0], &args[1], dest, dest_ty)?; + } + + "unchecked_shr" => { + let bits = self.type_size(dest_ty)?.expect("intrinsic can't be called on unsized type") as u128 * 8; + let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?; + if rhs >= bits { + return Err(EvalError::Intrinsic(format!("Overflowing shift by {} in unchecked_shr", rhs))); + } + self.intrinsic_overflowing(mir::BinOp::Shr, &args[0], &args[1], dest, dest_ty)?; + } + + "unchecked_div" => { + let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?; + if rhs == 0 { + return Err(EvalError::Intrinsic(format!("Division by 0 in unchecked_div"))); + } + self.intrinsic_overflowing(mir::BinOp::Div, &args[0], &args[1], dest, dest_ty)?; + } + + "unchecked_rem" => { + let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?; + if rhs == 0 { + return Err(EvalError::Intrinsic(format!("Division by 0 in unchecked_rem"))); + } + self.intrinsic_overflowing(mir::BinOp::Rem, &args[0], &args[1], dest, dest_ty)?; + } + + "uninit" => { + let size = dest_layout.size(&self.tcx.data_layout).bytes(); + let uninit = |this: &mut Self, val: Value| { + match val { + Value::ByRef(ptr, aligned) => { + this.memory.mark_definedness(ptr, size, false)?; + Ok(Value::ByRef(ptr, aligned)) + }, + _ => Ok(Value::ByVal(PrimVal::Undef)), + } + }; + match dest { + Lvalue::Local { frame, local } => self.modify_local(frame, local, uninit)?, + Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } => + self.memory.mark_definedness(ptr, size, false)?, + Lvalue::Ptr { .. } => bug!("uninit intrinsic tried to write to fat or unaligned ptr target"), + Lvalue::Global(cid) => self.modify_global(cid, uninit)?, + } + } + + "write_bytes" => { + let u8 = self.tcx.types.u8; + let ty = substs.type_at(0); + let ty_align = self.type_align(ty)?; + let val_byte = self.value_to_primval(arg_vals[1], u8)?.to_u128()? as u8; + let size = self.type_size(ty)?.expect("write_bytes() type must be sized"); + let ptr = arg_vals[0].into_ptr(&mut self.memory)?; + let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?; + if count > 0 { + // TODO: Should we, at least, validate the alignment? (Also see memory::copy) + self.memory.check_align(ptr, ty_align)?; + self.memory.write_repeat(ptr, val_byte, size * count)?; + } + } + + name => return Err(EvalError::Unimplemented(format!("unimplemented intrinsic: {}", name))), + } + + self.goto_block(target); + + // Since we pushed no stack frame, the main loop will act + // as if the call just completed and it's returning to the + // current frame. + Ok(()) + } + + pub fn size_and_align_of_dst( + &mut self, + ty: ty::Ty<'tcx>, + value: Value, + ) -> EvalResult<'tcx, (u64, u64)> { + if let Some(size) = self.type_size(ty)? { + Ok((size as u64, self.type_align(ty)? as u64)) + } else { + match ty.sty { + ty::TyAdt(def, substs) => { + // First get the size of all statically known fields. + // Don't use type_of::sizing_type_of because that expects t to be sized, + // and it also rounds up to alignment, which we want to avoid, + // as the unsized field's alignment could be smaller. + assert!(!ty.is_simd()); + let layout = self.type_layout(ty)?; + debug!("DST {} layout: {:?}", ty, layout); + + let (sized_size, sized_align) = match *layout { + ty::layout::Layout::Univariant { ref variant, .. } => { + (variant.offsets.last().map_or(0, |o| o.bytes()), variant.align) + } + _ => { + bug!("size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}", + ty, layout); + } + }; + debug!("DST {} statically sized prefix size: {} align: {:?}", + ty, sized_size, sized_align); + + // Recurse to get the size of the dynamically sized field (must be + // the last field). + let last_field = def.struct_variant().fields.last().unwrap(); + let field_ty = self.field_ty(substs, last_field); + let (unsized_size, unsized_align) = self.size_and_align_of_dst(field_ty, value)?; + + // FIXME (#26403, #27023): We should be adding padding + // to `sized_size` (to accommodate the `unsized_align` + // required of the unsized field that follows) before + // summing it with `sized_size`. (Note that since #26403 + // is unfixed, we do not yet add the necessary padding + // here. But this is where the add would go.) + + // Return the sum of sizes and max of aligns. + let size = sized_size + unsized_size; + + // Choose max of two known alignments (combined value must + // be aligned according to more restrictive of the two). + let align = sized_align.max(Align::from_bytes(unsized_align, unsized_align).unwrap()); + + // Issue #27023: must add any necessary padding to `size` + // (to make it a multiple of `align`) before returning it. + // + // Namely, the returned size should be, in C notation: + // + // `size + ((size & (align-1)) ? align : 0)` + // + // emulated via the semi-standard fast bit trick: + // + // `(size + (align-1)) & -align` + + let size = Size::from_bytes(size).abi_align(align).bytes(); + Ok((size, align.abi())) + } + ty::TyDynamic(..) => { + let (_, vtable) = value.into_ptr_vtable_pair(&mut self.memory)?; + // the second entry in the vtable is the dynamic size of the object. + self.read_size_and_align_from_vtable(vtable) + } + + ty::TySlice(_) | ty::TyStr => { + let elem_ty = ty.sequence_element_type(self.tcx); + let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized") as u64; + let (_, len) = value.into_slice(&mut self.memory)?; + let align = self.type_align(elem_ty)?; + Ok((len * elem_size, align as u64)) + } + + _ => bug!("size_of_val::<{:?}>", ty), + } + } + } + /// Returns the normalized type of a struct field + fn field_ty( + &self, + param_substs: &Substs<'tcx>, + f: &ty::FieldDef, + ) -> ty::Ty<'tcx> { + self.tcx.normalize_associated_type(&f.ty(self.tcx, param_substs)) + } +} + +fn numeric_intrinsic<'tcx>( + name: &str, + bytes: u128, + kind: PrimValKind +) -> EvalResult<'tcx, PrimVal> { + macro_rules! integer_intrinsic { + ($method:ident) => ({ + use miri::value::PrimValKind::*; + let result_bytes = match kind { + I8 => (bytes as i8).$method() as u128, + U8 => (bytes as u8).$method() as u128, + I16 => (bytes as i16).$method() as u128, + U16 => (bytes as u16).$method() as u128, + I32 => (bytes as i32).$method() as u128, + U32 => (bytes as u32).$method() as u128, + I64 => (bytes as i64).$method() as u128, + U64 => (bytes as u64).$method() as u128, + I128 => (bytes as i128).$method() as u128, + U128 => bytes.$method() as u128, + _ => bug!("invalid `{}` argument: {:?}", name, bytes), + }; + + PrimVal::Bytes(result_bytes) + }); + } + + let result_val = match name { + "bswap" => integer_intrinsic!(swap_bytes), + "ctlz" => integer_intrinsic!(leading_zeros), + "ctpop" => integer_intrinsic!(count_ones), + "cttz" => integer_intrinsic!(trailing_zeros), + _ => bug!("not a numeric intrinsic: {}", name), + }; + + Ok(result_val) +} diff --git a/src/librustc_mir/miri/terminator/mod.rs b/src/librustc_mir/miri/terminator/mod.rs new file mode 100644 index 0000000000000..9ff4a877259e9 --- /dev/null +++ b/src/librustc_mir/miri/terminator/mod.rs @@ -0,0 +1,1000 @@ +use rustc::hir::def_id::{DefId, CRATE_DEF_INDEX}; +use rustc::mir; +use rustc::ty::{self, TypeVariants, Ty}; +use rustc::ty::layout::Layout; +use syntax::codemap::Span; +use syntax::attr; +use syntax::abi::Abi; + +use super::error::{EvalError, EvalResult}; +use super::eval_context::{ + EvalContext, IntegerExt, StackPopCleanup, is_inhabited, resolve, + resolve_drop_in_place, apply_param_substs, +}; +use super::lvalue::Lvalue; +use super::memory::{MemoryPointer, TlsKey, Kind}; +use super::value::{PrimVal, Value}; +use rustc_data_structures::indexed_vec::Idx; +use super::const_eval::eval_body_as_primval; + +use std::mem; + +mod drop; +mod intrinsic; + +impl<'a, 'tcx> EvalContext<'a, 'tcx> { + pub(super) fn goto_block(&mut self, target: mir::BasicBlock) { + self.frame_mut().block = target; + self.frame_mut().stmt = 0; + } + + pub(super) fn eval_terminator( + &mut self, + terminator: &mir::Terminator<'tcx>, + ) -> EvalResult<'tcx> { + use rustc::mir::TerminatorKind::*; + match terminator.kind { + Return => { + self.dump_local(self.frame().return_lvalue); + self.pop_stack_frame()? + } + + Goto { target } => self.goto_block(target), + + SwitchInt { ref discr, ref values, ref targets, .. } => { + if self.const_env() { + return Err(EvalError::NeedsRfc("branching (if, match, loop, ...)".to_string())); + } + let discr_val = self.eval_operand(discr)?; + let discr_ty = self.operand_ty(discr); + let discr_prim = self.value_to_primval(discr_val, discr_ty)?; + + // Branch to the `otherwise` case by default, if no match is found. + let mut target_block = targets[targets.len() - 1]; + + for (index, const_int) in values.iter().enumerate() { + let prim = PrimVal::Bytes(const_int.to_u128_unchecked()); + if discr_prim.to_bytes()? == prim.to_bytes()? { + target_block = targets[index]; + break; + } + } + + self.goto_block(target_block); + } + + Call { ref func, ref args, ref destination, .. } => { + let destination = match *destination { + Some((ref lv, target)) => Some((self.eval_lvalue(lv)?, target)), + None => None, + }; + + let func_ty = self.operand_ty(func); + let (fn_def, sig) = match func_ty.sty { + ty::TyFnPtr(sig) => { + let fn_ptr = self.eval_operand_to_primval(func)?.to_ptr()?; + let instance = self.memory.get_fn(fn_ptr)?; + let instance_ty = instance.def.def_ty(self.tcx); + let instance_ty = self.monomorphize(instance_ty, instance.substs); + match instance_ty.sty { + ty::TyFnDef(..) => { + let real_sig = instance_ty.fn_sig(self.tcx); + let sig = self.erase_lifetimes(&sig); + let real_sig = self.erase_lifetimes(&real_sig); + let real_sig = self.tcx.normalize_associated_type(&real_sig); + if !self.check_sig_compat(sig, real_sig)? { + return Err(EvalError::FunctionPointerTyMismatch(real_sig, sig)); + } + }, + ref other => bug!("instance def ty: {:?}", other), + } + (instance, sig) + }, + ty::TyFnDef(def_id, substs) => (resolve(self.tcx, def_id, substs), func_ty.fn_sig(self.tcx)), + _ => { + let msg = format!("can't handle callee of type {:?}", func_ty); + return Err(EvalError::Unimplemented(msg)); + } + }; + let sig = self.erase_lifetimes(&sig); + self.eval_fn_call(fn_def, destination, args, terminator.source_info.span, sig)?; + } + + Drop { ref location, target, .. } => { + trace!("TerminatorKind::drop: {:?}, {:?}", location, self.substs()); + if self.const_env() { + return Err(EvalError::NeedsRfc("invoking `Drop::drop`".to_string())); + } + let lval = self.eval_lvalue(location)?; + let ty = self.lvalue_ty(location); + self.goto_block(target); + let ty = apply_param_substs(self.tcx, self.substs(), &ty); + + let instance = resolve_drop_in_place(self.tcx, ty); + self.drop_lvalue(lval, instance, ty, terminator.source_info.span)?; + } + + Assert { ref cond, expected, ref msg, target, .. } => { + let cond_val = self.eval_operand_to_primval(cond)?.to_bool()?; + if expected == cond_val { + self.goto_block(target); + } else { + return match *msg { + mir::AssertMessage::BoundsCheck { ref len, ref index } => { + let span = terminator.source_info.span; + let len = self.eval_operand_to_primval(len) + .expect("can't eval len") + .to_u64()?; + let index = self.eval_operand_to_primval(index) + .expect("can't eval index") + .to_u64()?; + Err(EvalError::ArrayIndexOutOfBounds(span, len, index)) + }, + mir::AssertMessage::Math(ref err) => + Err(EvalError::Math(terminator.source_info.span, err.clone())), + } + } + }, + + DropAndReplace { .. } => unimplemented!(), + Resume => unimplemented!(), + Unreachable => return Err(EvalError::Unreachable), + } + + Ok(()) + } + + /// Decides whether it is okay to call the method with signature `real_sig` using signature `sig`. + /// FIXME: This should take into account the platform-dependent ABI description. + fn check_sig_compat( + &mut self, + sig: ty::FnSig<'tcx>, + real_sig: ty::FnSig<'tcx>, + ) -> EvalResult<'tcx, bool> { + fn check_ty_compat<'tcx>( + ty: ty::Ty<'tcx>, + real_ty: ty::Ty<'tcx>, + ) -> bool { + if ty == real_ty { return true; } // This is actually a fast pointer comparison + return match (&ty.sty, &real_ty.sty) { + // Permit changing the pointer type of raw pointers and references as well as + // mutability of raw pointers. + // TODO: Should not be allowed when fat pointers are involved. + (&TypeVariants::TyRawPtr(_), &TypeVariants::TyRawPtr(_)) => true, + (&TypeVariants::TyRef(_, _), &TypeVariants::TyRef(_, _)) => + ty.is_mutable_pointer() == real_ty.is_mutable_pointer(), + // rule out everything else + _ => false + } + } + + if sig.abi == real_sig.abi && + sig.variadic == real_sig.variadic && + sig.inputs_and_output.len() == real_sig.inputs_and_output.len() && + sig.inputs_and_output.iter().zip(real_sig.inputs_and_output).all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) { + // Definitely good. + return Ok(true); + } + + if sig.variadic || real_sig.variadic { + // We're not touching this + return Ok(false); + } + + // We need to allow what comes up when a non-capturing closure is cast to a fn(). + match (sig.abi, real_sig.abi) { + (Abi::Rust, Abi::RustCall) // check the ABIs. This makes the test here non-symmetric. + if check_ty_compat(sig.output(), real_sig.output()) && real_sig.inputs_and_output.len() == 3 => { + // First argument of real_sig must be a ZST + let fst_ty = real_sig.inputs_and_output[0]; + let layout = self.type_layout(fst_ty)?; + let size = layout.size(&self.tcx.data_layout).bytes(); + if size == 0 { + // Second argument must be a tuple matching the argument list of sig + let snd_ty = real_sig.inputs_and_output[1]; + match snd_ty.sty { + TypeVariants::TyTuple(tys, _) if sig.inputs().len() == tys.len() => + if sig.inputs().iter().zip(tys).all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) { + return Ok(true) + }, + _ => {} + } + } + } + _ => {} + }; + + // Nope, this doesn't work. + return Ok(false); + } + + fn eval_fn_call( + &mut self, + instance: ty::Instance<'tcx>, + destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>, + arg_operands: &[mir::Operand<'tcx>], + span: Span, + sig: ty::FnSig<'tcx>, + ) -> EvalResult<'tcx> { + trace!("eval_fn_call: {:#?}", instance); + match instance.def { + ty::InstanceDef::Intrinsic(..) => { + let (ret, target) = match destination { + Some(dest) => dest, + _ => return Err(EvalError::Unreachable), + }; + let ty = sig.output(); + if !is_inhabited(self.tcx, ty) { + return Err(EvalError::Unreachable); + } + let layout = self.type_layout(ty)?; + self.call_intrinsic(instance, arg_operands, ret, ty, layout, target)?; + self.dump_local(ret); + Ok(()) + }, + ty::InstanceDef::ClosureOnceShim{..} => { + let mut args = Vec::new(); + for arg in arg_operands { + let arg_val = self.eval_operand(arg)?; + let arg_ty = self.operand_ty(arg); + args.push((arg_val, arg_ty)); + } + if self.eval_fn_call_inner( + instance, + destination, + arg_operands, + span, + sig, + )? { + return Ok(()); + } + let mut arg_locals = self.frame().mir.args_iter(); + match sig.abi { + // closure as closure once + Abi::RustCall => { + for (arg_local, (arg_val, arg_ty)) in arg_locals.zip(args) { + let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; + self.write_value(arg_val, dest, arg_ty)?; + } + }, + // non capture closure as fn ptr + // need to inject zst ptr for closure object (aka do nothing) + // and need to pack arguments + Abi::Rust => { + trace!("arg_locals: {:?}", self.frame().mir.args_iter().collect::>()); + trace!("arg_operands: {:?}", arg_operands); + let local = arg_locals.nth(1).unwrap(); + for (i, (arg_val, arg_ty)) in args.into_iter().enumerate() { + let dest = self.eval_lvalue(&mir::Lvalue::Local(local).field(mir::Field::new(i), arg_ty))?; + self.write_value(arg_val, dest, arg_ty)?; + } + }, + _ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi), + } + Ok(()) + } + ty::InstanceDef::Item(_) => { + let mut args = Vec::new(); + for arg in arg_operands { + let arg_val = self.eval_operand(arg)?; + let arg_ty = self.operand_ty(arg); + args.push((arg_val, arg_ty)); + } + + // Push the stack frame, and potentially be entirely done if the call got hooked + if self.eval_fn_call_inner( + instance, + destination, + arg_operands, + span, + sig, + )? { + return Ok(()); + } + + // Pass the arguments + let mut arg_locals = self.frame().mir.args_iter(); + trace!("ABI: {:?}", sig.abi); + trace!("arg_locals: {:?}", self.frame().mir.args_iter().collect::>()); + trace!("arg_operands: {:?}", arg_operands); + match sig.abi { + Abi::RustCall => { + assert_eq!(args.len(), 2); + + { // write first argument + let first_local = arg_locals.next().unwrap(); + let dest = self.eval_lvalue(&mir::Lvalue::Local(first_local))?; + let (arg_val, arg_ty) = args.remove(0); + self.write_value(arg_val, dest, arg_ty)?; + } + + // unpack and write all other args + let (arg_val, arg_ty) = args.remove(0); + let layout = self.type_layout(arg_ty)?; + if let (&ty::TyTuple(fields, _), &Layout::Univariant { ref variant, .. }) = (&arg_ty.sty, layout) { + trace!("fields: {:?}", fields); + if self.frame().mir.args_iter().count() == fields.len() + 1 { + let offsets = variant.offsets.iter().map(|s| s.bytes()); + match arg_val { + Value::ByRef(ptr, aligned) => { + assert!(aligned, "Unaligned ByRef-values cannot occur as function arguments"); + for ((offset, ty), arg_local) in offsets.zip(fields).zip(arg_locals) { + let arg = Value::ByRef(ptr.offset(offset, self.memory.layout)?, true); + let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; + trace!("writing arg {:?} to {:?} (type: {})", arg, dest, ty); + self.write_value(arg, dest, ty)?; + } + }, + Value::ByVal(PrimVal::Undef) => {}, + other => { + assert_eq!(fields.len(), 1); + let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_locals.next().unwrap()))?; + self.write_value(other, dest, fields[0])?; + } + } + } else { + trace!("manual impl of rust-call ABI"); + // called a manual impl of a rust-call function + let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_locals.next().unwrap()))?; + self.write_value(arg_val, dest, arg_ty)?; + } + } else { + bug!("rust-call ABI tuple argument was {:?}, {:?}", arg_ty, layout); + } + }, + _ => { + for (arg_local, (arg_val, arg_ty)) in arg_locals.zip(args) { + let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; + self.write_value(arg_val, dest, arg_ty)?; + } + } + } + Ok(()) + }, + ty::InstanceDef::DropGlue(..) => { + assert_eq!(arg_operands.len(), 1); + assert_eq!(sig.abi, Abi::Rust); + let val = self.eval_operand(&arg_operands[0])?; + let ty = self.operand_ty(&arg_operands[0]); + let (_, target) = destination.expect("diverging drop glue"); + self.goto_block(target); + // FIXME: deduplicate these matches + let pointee_type = match ty.sty { + ty::TyRawPtr(ref tam) | + ty::TyRef(_, ref tam) => tam.ty, + ty::TyAdt(def, _) if def.is_box() => ty.boxed_ty(), + _ => bug!("can only deref pointer types"), + }; + self.drop(val, instance, pointee_type, span) + }, + ty::InstanceDef::FnPtrShim(..) => { + trace!("ABI: {}", sig.abi); + let mut args = Vec::new(); + for arg in arg_operands { + let arg_val = self.eval_operand(arg)?; + let arg_ty = self.operand_ty(arg); + args.push((arg_val, arg_ty)); + } + if self.eval_fn_call_inner( + instance, + destination, + arg_operands, + span, + sig, + )? { + return Ok(()); + } + let arg_locals = self.frame().mir.args_iter(); + match sig.abi { + Abi::Rust => { + args.remove(0); + }, + Abi::RustCall => {}, + _ => unimplemented!(), + }; + for (arg_local, (arg_val, arg_ty)) in arg_locals.zip(args) { + let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; + self.write_value(arg_val, dest, arg_ty)?; + } + Ok(()) + }, + ty::InstanceDef::Virtual(_, idx) => { + let ptr_size = self.memory.pointer_size(); + let (_, vtable) = self.eval_operand(&arg_operands[0])?.into_ptr_vtable_pair(&mut self.memory)?; + let fn_ptr = self.memory.read_ptr(vtable.offset(ptr_size * (idx as u64 + 3), self.memory.layout)?)?; + let instance = self.memory.get_fn(fn_ptr.to_ptr()?)?; + let mut arg_operands = arg_operands.to_vec(); + let ty = self.operand_ty(&arg_operands[0]); + let ty = self.get_field_ty(ty, 0)?; + match arg_operands[0] { + mir::Operand::Consume(ref mut lval) => *lval = lval.clone().field(mir::Field::new(0), ty), + _ => bug!("virtual call first arg cannot be a constant"), + } + // recurse with concrete function + self.eval_fn_call( + instance, + destination, + &arg_operands, + span, + sig, + ) + }, + } + } + + /// Returns Ok(true) when the function was handled completely due to mir not being available + fn eval_fn_call_inner( + &mut self, + instance: ty::Instance<'tcx>, + destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>, + arg_operands: &[mir::Operand<'tcx>], + span: Span, + sig: ty::FnSig<'tcx>, + ) -> EvalResult<'tcx, bool> { + trace!("eval_fn_call_inner: {:#?}, {:#?}", instance, destination); + + // Only trait methods can have a Self parameter. + + let mir = match self.load_mir(instance.def) { + Ok(mir) => mir, + Err(EvalError::NoMirFor(path)) => { + if self.const_env() { + return Err(EvalError::NeedsRfc(format!("calling extern function `{}`", path))); + } + self.call_missing_fn(instance, destination, arg_operands, sig, path)?; + return Ok(true); + }, + Err(other) => return Err(other), + }; + + if self.const_env() && !self.tcx.is_const_fn(instance.def_id()) { + return Err(EvalError::NotConst(format!("calling non-const fn `{}`", instance))); + } + + let (return_lvalue, return_to_block) = match destination { + Some((lvalue, block)) => (lvalue, StackPopCleanup::Goto(block)), + None => (Lvalue::undef(), StackPopCleanup::None), + }; + + self.push_stack_frame( + instance, + span, + mir, + return_lvalue, + return_to_block, + )?; + + Ok(false) + } + + pub fn read_discriminant_value(&self, adt_ptr: MemoryPointer, adt_ty: Ty<'tcx>) -> EvalResult<'tcx, u128> { + use rustc::ty::layout::Layout::*; + let adt_layout = self.type_layout(adt_ty)?; + trace!("read_discriminant_value {:#?}", adt_layout); + + let discr_val = match *adt_layout { + General { discr, .. } | CEnum { discr, signed: false, .. } => { + let discr_size = discr.size().bytes(); + self.memory.read_uint(adt_ptr, discr_size)? + } + + CEnum { discr, signed: true, .. } => { + let discr_size = discr.size().bytes(); + self.memory.read_int(adt_ptr, discr_size)? as u128 + } + + RawNullablePointer { nndiscr, value } => { + let discr_size = value.size(&self.tcx.data_layout).bytes(); + trace!("rawnullablepointer with size {}", discr_size); + self.read_nonnull_discriminant_value(adt_ptr, nndiscr as u128, discr_size)? + } + + StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { + let (offset, ty) = self.nonnull_offset_and_ty(adt_ty, nndiscr, discrfield)?; + let nonnull = adt_ptr.offset(offset.bytes(), self.memory.layout)?; + trace!("struct wrapped nullable pointer type: {}", ty); + // only the pointer part of a fat pointer is used for this space optimization + let discr_size = self.type_size(ty)?.expect("bad StructWrappedNullablePointer discrfield"); + self.read_nonnull_discriminant_value(nonnull, nndiscr as u128, discr_size)? + } + + // The discriminant_value intrinsic returns 0 for non-sum types. + Array { .. } | FatPointer { .. } | Scalar { .. } | Univariant { .. } | + Vector { .. } | UntaggedUnion { .. } => 0, + }; + + Ok(discr_val) + } + + fn read_nonnull_discriminant_value(&self, ptr: MemoryPointer, nndiscr: u128, discr_size: u64) -> EvalResult<'tcx, u128> { + trace!("read_nonnull_discriminant_value: {:?}, {}, {}", ptr, nndiscr, discr_size); + let not_null = match self.memory.read_uint(ptr, discr_size) { + Ok(0) => false, + Ok(_) | Err(EvalError::ReadPointerAsBytes) => true, + Err(e) => return Err(e), + }; + assert!(nndiscr == 0 || nndiscr == 1); + Ok(if not_null { nndiscr } else { 1 - nndiscr }) + } + + /// Returns Ok() when the function was handled, fail otherwise + fn call_missing_fn( + &mut self, + instance: ty::Instance<'tcx>, + destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>, + arg_operands: &[mir::Operand<'tcx>], + sig: ty::FnSig<'tcx>, + path: String, + ) -> EvalResult<'tcx> { + // In some cases in non-MIR libstd-mode, not having a destination is legit. Handle these early. + match &path[..] { + "std::panicking::rust_panic_with_hook" | + "std::rt::begin_panic_fmt" => return Err(EvalError::Panic), + _ => {}, + } + + let dest_ty = sig.output(); + let (dest, dest_block) = destination.ok_or_else(|| EvalError::NoMirFor(path.clone()))?; + + if sig.abi == Abi::C { + // An external C function + // TODO: That functions actually has a similar preamble to what follows here. May make sense to + // unify these two mechanisms for "hooking into missing functions". + self.call_c_abi(instance.def_id(), arg_operands, dest, dest_ty, dest_block)?; + return Ok(()); + } + + let args_res: EvalResult> = arg_operands.iter() + .map(|arg| self.eval_operand(arg)) + .collect(); + let args = args_res?; + + let usize = self.tcx.types.usize; + + match &path[..] { + // Allocators are magic. They have no MIR, even when the rest of libstd does. + "alloc::heap::::__rust_alloc" => { + let size = self.value_to_primval(args[0], usize)?.to_u64()?; + let align = self.value_to_primval(args[1], usize)?.to_u64()?; + if size == 0 { + return Err(EvalError::HeapAllocZeroBytes); + } + if !align.is_power_of_two() { + return Err(EvalError::HeapAllocNonPowerOfTwoAlignment(align)); + } + let ptr = self.memory.allocate(size, align, Kind::Rust)?; + self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?; + } + "alloc::heap::::__rust_alloc_zeroed" => { + let size = self.value_to_primval(args[0], usize)?.to_u64()?; + let align = self.value_to_primval(args[1], usize)?.to_u64()?; + if size == 0 { + return Err(EvalError::HeapAllocZeroBytes); + } + if !align.is_power_of_two() { + return Err(EvalError::HeapAllocNonPowerOfTwoAlignment(align)); + } + let ptr = self.memory.allocate(size, align, Kind::Rust)?; + self.memory.write_repeat(ptr.into(), 0, size)?; + self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?; + } + "alloc::heap::::__rust_dealloc" => { + let ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?; + let old_size = self.value_to_primval(args[1], usize)?.to_u64()?; + let align = self.value_to_primval(args[2], usize)?.to_u64()?; + if old_size == 0 { + return Err(EvalError::HeapAllocZeroBytes); + } + if !align.is_power_of_two() { + return Err(EvalError::HeapAllocNonPowerOfTwoAlignment(align)); + } + self.memory.deallocate(ptr, Some((old_size, align)), Kind::Rust)?; + } + "alloc::heap::::__rust_realloc" => { + let ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?; + let old_size = self.value_to_primval(args[1], usize)?.to_u64()?; + let old_align = self.value_to_primval(args[2], usize)?.to_u64()?; + let new_size = self.value_to_primval(args[3], usize)?.to_u64()?; + let new_align = self.value_to_primval(args[4], usize)?.to_u64()?; + if old_size == 0 || new_size == 0 { + return Err(EvalError::HeapAllocZeroBytes); + } + if !old_align.is_power_of_two() { + return Err(EvalError::HeapAllocNonPowerOfTwoAlignment(old_align)); + } + if !new_align.is_power_of_two() { + return Err(EvalError::HeapAllocNonPowerOfTwoAlignment(new_align)); + } + let new_ptr = self.memory.reallocate(ptr, old_size, old_align, new_size, new_align, Kind::Rust)?; + self.write_primval(dest, PrimVal::Ptr(new_ptr), dest_ty)?; + } + + // A Rust function is missing, which means we are running with MIR missing for libstd (or other dependencies). + // Still, we can make many things mostly work by "emulating" or ignoring some functions. + "std::io::_print" => { + trace!("Ignoring output. To run programs that print, make sure you have a libstd with full MIR."); + } + "std::thread::Builder::new" => return Err(EvalError::Unimplemented("miri does not support threading".to_owned())), + "std::env::args" => return Err(EvalError::Unimplemented("miri does not support program arguments".to_owned())), + "std::panicking::panicking" | + "std::rt::panicking" => { + // we abort on panic -> `std::rt::panicking` always returns false + let bool = self.tcx.types.bool; + self.write_primval(dest, PrimVal::from_bool(false), bool)?; + } + _ => return Err(EvalError::NoMirFor(path)), + } + + // Since we pushed no stack frame, the main loop will act + // as if the call just completed and it's returning to the + // current frame. + self.dump_local(dest); + self.goto_block(dest_block); + return Ok(()); + } + + fn call_c_abi( + &mut self, + def_id: DefId, + arg_operands: &[mir::Operand<'tcx>], + dest: Lvalue<'tcx>, + dest_ty: Ty<'tcx>, + dest_block: mir::BasicBlock, + ) -> EvalResult<'tcx> { + let name = self.tcx.item_name(def_id); + let attrs = self.tcx.get_attrs(def_id); + let link_name = attr::first_attr_value_str_by_name(&attrs, "link_name") + .unwrap_or(name) + .as_str(); + + let args_res: EvalResult> = arg_operands.iter() + .map(|arg| self.eval_operand(arg)) + .collect(); + let args = args_res?; + + let usize = self.tcx.types.usize; + + match &link_name[..] { + "malloc" => { + let size = self.value_to_primval(args[0], usize)?.to_u64()?; + if size == 0 { + self.write_null(dest, dest_ty)?; + } else { + let align = self.memory.pointer_size(); + let ptr = self.memory.allocate(size, align, Kind::C)?; + self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?; + } + } + + "free" => { + let ptr = args[0].into_ptr(&mut self.memory)?; + if !ptr.is_null()? { + self.memory.deallocate(ptr.to_ptr()?, None, Kind::C)?; + } + } + + "syscall" => { + match self.value_to_primval(args[0], usize)?.to_u64()? { + 511 => return Err(EvalError::Unimplemented("miri does not support random number generators".to_owned())), + id => return Err(EvalError::Unimplemented(format!("miri does not support syscall id {}", id))), + } + } + + "dlsym" => { + let _handle = args[0].into_ptr(&mut self.memory)?; + let symbol = args[1].into_ptr(&mut self.memory)?.to_ptr()?; + let symbol_name = self.memory.read_c_str(symbol)?; + let err = format!("bad c unicode symbol: {:?}", symbol_name); + let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err); + return Err(EvalError::Unimplemented(format!("miri does not support dynamically loading libraries (requested symbol: {})", symbol_name))); + } + + "__rust_maybe_catch_panic" => { + // fn __rust_maybe_catch_panic(f: fn(*mut u8), data: *mut u8, data_ptr: *mut usize, vtable_ptr: *mut usize) -> u32 + // We abort on panic, so not much is going on here, but we still have to call the closure + let u8_ptr_ty = self.tcx.mk_mut_ptr(self.tcx.types.u8); + let f = args[0].into_ptr(&mut self.memory)?.to_ptr()?; + let data = args[1].into_ptr(&mut self.memory)?; + let f_instance = self.memory.get_fn(f)?; + self.write_null(dest, dest_ty)?; + + // Now we make a function call. TODO: Consider making this re-usable? EvalContext::step does sth. similar for the TLS dtors, + // and of course eval_main. + let mir = self.load_mir(f_instance.def)?; + self.push_stack_frame( + f_instance, + mir.span, + mir, + Lvalue::undef(), + StackPopCleanup::Goto(dest_block), + )?; + + let arg_local = self.frame().mir.args_iter().next().ok_or(EvalError::AbiViolation("Argument to __rust_maybe_catch_panic does not take enough arguments.".to_owned()))?; + let arg_dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; + self.write_ptr(arg_dest, data, u8_ptr_ty)?; + + // We ourselves return 0 + self.write_null(dest, dest_ty)?; + + // Don't fall through + return Ok(()); + } + + "__rust_start_panic" => { + return Err(EvalError::Panic); + } + + "memcmp" => { + let left = args[0].into_ptr(&mut self.memory)?; + let right = args[1].into_ptr(&mut self.memory)?; + let n = self.value_to_primval(args[2], usize)?.to_u64()?; + + let result = { + let left_bytes = self.memory.read_bytes(left, n)?; + let right_bytes = self.memory.read_bytes(right, n)?; + + use std::cmp::Ordering::*; + match left_bytes.cmp(right_bytes) { + Less => -1i8, + Equal => 0, + Greater => 1, + } + }; + + self.write_primval(dest, PrimVal::Bytes(result as u128), dest_ty)?; + } + + "memrchr" => { + let ptr = args[0].into_ptr(&mut self.memory)?; + let val = self.value_to_primval(args[1], usize)?.to_u64()? as u8; + let num = self.value_to_primval(args[2], usize)?.to_u64()?; + if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().rev().position(|&c| c == val) { + let new_ptr = ptr.offset(num - idx as u64 - 1, self.memory.layout)?; + self.write_ptr(dest, new_ptr, dest_ty)?; + } else { + self.write_null(dest, dest_ty)?; + } + } + + "memchr" => { + let ptr = args[0].into_ptr(&mut self.memory)?; + let val = self.value_to_primval(args[1], usize)?.to_u64()? as u8; + let num = self.value_to_primval(args[2], usize)?.to_u64()?; + if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().position(|&c| c == val) { + let new_ptr = ptr.offset(idx as u64, self.memory.layout)?; + self.write_ptr(dest, new_ptr, dest_ty)?; + } else { + self.write_null(dest, dest_ty)?; + } + } + + "getenv" => { + let result = { + let name_ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?; + let name = self.memory.read_c_str(name_ptr)?; + match self.env_vars.get(name) { + Some(&var) => PrimVal::Ptr(var), + None => PrimVal::Bytes(0), + } + }; + self.write_primval(dest, result, dest_ty)?; + } + + "unsetenv" => { + let mut success = None; + { + let name_ptr = args[0].into_ptr(&mut self.memory)?; + if !name_ptr.is_null()? { + let name = self.memory.read_c_str(name_ptr.to_ptr()?)?; + if !name.is_empty() && !name.contains(&b'=') { + success = Some(self.env_vars.remove(name)); + } + } + } + if let Some(old) = success { + if let Some(var) = old { + self.memory.deallocate(var, None, Kind::Env)?; + } + self.write_null(dest, dest_ty)?; + } else { + self.write_primval(dest, PrimVal::from_i128(-1), dest_ty)?; + } + } + + "setenv" => { + let mut new = None; + { + let name_ptr = args[0].into_ptr(&mut self.memory)?; + let value_ptr = args[1].into_ptr(&mut self.memory)?.to_ptr()?; + let value = self.memory.read_c_str(value_ptr)?; + if !name_ptr.is_null()? { + let name = self.memory.read_c_str(name_ptr.to_ptr()?)?; + if !name.is_empty() && !name.contains(&b'=') { + new = Some((name.to_owned(), value.to_owned())); + } + } + } + if let Some((name, value)) = new { + // +1 for the null terminator + let value_copy = self.memory.allocate((value.len() + 1) as u64, 1, Kind::Env)?; + self.memory.write_bytes(PrimVal::Ptr(value_copy), &value)?; + self.memory.write_bytes(PrimVal::Ptr(value_copy.offset(value.len() as u64, self.memory.layout)?), &[0])?; + if let Some(var) = self.env_vars.insert(name.to_owned(), value_copy) { + self.memory.deallocate(var, None, Kind::Env)?; + } + self.write_null(dest, dest_ty)?; + } else { + self.write_primval(dest, PrimVal::from_i128(-1), dest_ty)?; + } + } + + "write" => { + let fd = self.value_to_primval(args[0], usize)?.to_u64()?; + let buf = args[1].into_ptr(&mut self.memory)?; + let n = self.value_to_primval(args[2], usize)?.to_u64()?; + trace!("Called write({:?}, {:?}, {:?})", fd, buf, n); + let result = if fd == 1 || fd == 2 { // stdout/stderr + use std::io::{self, Write}; + + let buf_cont = self.memory.read_bytes(buf, n)?; + let res = if fd == 1 { io::stdout().write(buf_cont) } else { io::stderr().write(buf_cont) }; + match res { Ok(n) => n as isize, Err(_) => -1 } + } else { + info!("Ignored output to FD {}", fd); + n as isize // pretend it all went well + }; // now result is the value we return back to the program + self.write_primval(dest, PrimVal::Bytes(result as u128), dest_ty)?; + } + + "strlen" => { + let ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?; + let n = self.memory.read_c_str(ptr)?.len(); + self.write_primval(dest, PrimVal::Bytes(n as u128), dest_ty)?; + } + + // Some things needed for sys::thread initialization to go through + "signal" | "sigaction" | "sigaltstack" => { + self.write_primval(dest, PrimVal::Bytes(0), dest_ty)?; + } + + "sysconf" => { + let name = self.value_to_primval(args[0], usize)?.to_u64()?; + trace!("sysconf() called with name {}", name); + // cache the sysconf integers via miri's global cache + let paths = &[ + (&["libc", "_SC_PAGESIZE"], PrimVal::Bytes(4096)), + (&["libc", "_SC_GETPW_R_SIZE_MAX"], PrimVal::from_i128(-1)), + ]; + let mut result = None; + for &(path, path_value) in paths { + if let Ok(instance) = self.resolve_path(path) { + use super::lvalue::GlobalId; + let cid = GlobalId { instance, promoted: None }; + // compute global if not cached + let val = match self.globals.get(&cid).map(|glob| glob.value) { + Some(value) => self.value_to_primval(value, usize)?.to_u64()?, + None => eval_body_as_primval(self.tcx, instance)?.0.to_u64()?, + }; + if val == name { + result = Some(path_value); + break; + } + } + } + if let Some(result) = result { + self.write_primval(dest, result, dest_ty)?; + } else { + return Err(EvalError::Unimplemented(format!("Unimplemented sysconf name: {}", name))); + } + } + + // Hook pthread calls that go to the thread-local storage memory subsystem + "pthread_key_create" => { + let key_ptr = args[0].into_ptr(&mut self.memory)?; + + // Extract the function type out of the signature (that seems easier than constructing it ourselves...) + let dtor = match args[1].into_ptr(&mut self.memory)?.into_inner_primval() { + PrimVal::Ptr(dtor_ptr) => Some(self.memory.get_fn(dtor_ptr)?), + PrimVal::Bytes(0) => None, + PrimVal::Bytes(_) => return Err(EvalError::ReadBytesAsPointer), + PrimVal::Undef => return Err(EvalError::ReadUndefBytes), + }; + + // Figure out how large a pthread TLS key actually is. This is libc::pthread_key_t. + let key_type = self.operand_ty(&arg_operands[0]).builtin_deref(true, ty::LvaluePreference::NoPreference) + .ok_or(EvalError::AbiViolation("Wrong signature used for pthread_key_create: First argument must be a raw pointer.".to_owned()))?.ty; + let key_size = { + let layout = self.type_layout(key_type)?; + layout.size(&self.tcx.data_layout) + }; + + // Create key and write it into the memory where key_ptr wants it + let key = self.memory.create_tls_key(dtor) as u128; + if key_size.bits() < 128 && key >= (1u128 << key_size.bits() as u128) { + return Err(EvalError::OutOfTls); + } + // TODO: Does this need checking for alignment? + self.memory.write_uint(key_ptr.to_ptr()?, key, key_size.bytes())?; + + // Return success (0) + self.write_null(dest, dest_ty)?; + } + "pthread_key_delete" => { + // The conversion into TlsKey here is a little fishy, but should work as long as usize >= libc::pthread_key_t + let key = self.value_to_primval(args[0], usize)?.to_u64()? as TlsKey; + self.memory.delete_tls_key(key)?; + // Return success (0) + self.write_null(dest, dest_ty)?; + } + "pthread_getspecific" => { + // The conversion into TlsKey here is a little fishy, but should work as long as usize >= libc::pthread_key_t + let key = self.value_to_primval(args[0], usize)?.to_u64()? as TlsKey; + let ptr = self.memory.load_tls(key)?; + self.write_ptr(dest, ptr, dest_ty)?; + } + "pthread_setspecific" => { + // The conversion into TlsKey here is a little fishy, but should work as long as usize >= libc::pthread_key_t + let key = self.value_to_primval(args[0], usize)?.to_u64()? as TlsKey; + let new_ptr = args[1].into_ptr(&mut self.memory)?; + self.memory.store_tls(key, new_ptr)?; + + // Return success (0) + self.write_null(dest, dest_ty)?; + } + + // Stub out all the other pthread calls to just return 0 + link_name if link_name.starts_with("pthread_") => { + warn!("ignoring C ABI call: {}", link_name); + self.write_null(dest, dest_ty)?; + }, + + _ => { + return Err(EvalError::Unimplemented(format!("can't call C ABI function: {}", link_name))); + } + } + + // Since we pushed no stack frame, the main loop will act + // as if the call just completed and it's returning to the + // current frame. + self.dump_local(dest); + self.goto_block(dest_block); + Ok(()) + } + + /// Get an instance for a path. + fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>> { + let cstore = &self.tcx.sess.cstore; + + let crates = cstore.crates(); + crates.iter() + .find(|&&krate| cstore.crate_name(krate) == path[0]) + .and_then(|krate| { + let krate = DefId { + krate: *krate, + index: CRATE_DEF_INDEX, + }; + let mut items = cstore.item_children(krate, self.tcx.sess); + let mut path_it = path.iter().skip(1).peekable(); + + while let Some(segment) = path_it.next() { + for item in &mem::replace(&mut items, vec![]) { + if item.ident.name == *segment { + if path_it.peek().is_none() { + return Some(ty::Instance::mono(self.tcx, item.def.def_id())); + } + + items = cstore.item_children(item.def.def_id(), self.tcx.sess); + break; + } + } + } + None + }) + .ok_or_else(|| { + let path = path.iter() + .map(|&s| s.to_owned()) + .collect(); + EvalError::PathNotFound(path) + }) + } +} diff --git a/src/librustc_mir/miri/traits.rs b/src/librustc_mir/miri/traits.rs new file mode 100644 index 0000000000000..51937957edee2 --- /dev/null +++ b/src/librustc_mir/miri/traits.rs @@ -0,0 +1,114 @@ +use rustc::traits::{self, Reveal}; + +use super::eval_context::{ + EvalContext, resolve, resolve_drop_in_place, +}; +use super::memory::{MemoryPointer, Kind}; +use super::value::{Value, PrimVal}; + +use rustc::hir::def_id::DefId; +use rustc::ty::subst::Substs; +use rustc::ty::{self, Ty}; +use syntax::codemap::DUMMY_SP; +use syntax::ast::{self, Mutability}; + +use super::error::{EvalResult, EvalError}; + +impl<'a, 'tcx> EvalContext<'a, 'tcx> { + + pub(crate) fn fulfill_obligation(&self, trait_ref: ty::PolyTraitRef<'tcx>) -> traits::Vtable<'tcx, ()> { + // Do the initial selection for the obligation. This yields the shallow result we are + // looking for -- that is, what specific impl. + self.tcx.infer_ctxt().enter(|infcx| { + let mut selcx = traits::SelectionContext::new(&infcx); + + let obligation = traits::Obligation::new( + traits::ObligationCause::misc(DUMMY_SP, ast::DUMMY_NODE_ID), + ty::ParamEnv::empty(Reveal::All), + trait_ref.to_poly_trait_predicate(), + ); + let selection = selcx.select(&obligation).unwrap().unwrap(); + + // Currently, we use a fulfillment context to completely resolve all nested obligations. + // This is because they can inform the inference of the impl's type parameters. + let mut fulfill_cx = traits::FulfillmentContext::new(); + let vtable = selection.map(|predicate| { + fulfill_cx.register_predicate_obligation(&infcx, predicate); + }); + infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &vtable) + }) + } + + /// Creates a dynamic vtable for the given type and vtable origin. This is used only for + /// objects. + /// + /// The `trait_ref` encodes the erased self type. Hence if we are + /// making an object `Foo` from a value of type `Foo`, then + /// `trait_ref` would map `T:Trait`. + pub fn get_vtable(&mut self, ty: Ty<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>) -> EvalResult<'tcx, MemoryPointer> { + debug!("get_vtable(trait_ref={:?})", trait_ref); + + let size = self.type_size(trait_ref.self_ty())?.expect("can't create a vtable for an unsized type"); + let align = self.type_align(trait_ref.self_ty())?; + + let ptr_size = self.memory.pointer_size(); + let methods = ::rustc::traits::get_vtable_methods(self.tcx, trait_ref); + let vtable = self.memory.allocate(ptr_size * (3 + methods.count() as u64), ptr_size, Kind::UninitializedStatic)?; + + let drop = resolve_drop_in_place(self.tcx, ty); + let drop = self.memory.create_fn_alloc(drop); + self.memory.write_ptr(vtable, drop)?; + + self.memory.write_usize(vtable.offset(ptr_size, self.memory.layout)?, size)?; + self.memory.write_usize(vtable.offset(ptr_size * 2, self.memory.layout)?, align)?; + + for (i, method) in ::rustc::traits::get_vtable_methods(self.tcx, trait_ref).enumerate() { + if let Some((def_id, substs)) = method { + let instance = resolve(self.tcx, def_id, substs); + let fn_ptr = self.memory.create_fn_alloc(instance); + self.memory.write_ptr(vtable.offset(ptr_size * (3 + i as u64), self.memory.layout)?, fn_ptr)?; + } + } + + self.memory.mark_static_initalized(vtable.alloc_id, Mutability::Mutable)?; + + Ok(vtable) + } + + pub fn read_drop_type_from_vtable(&self, vtable: MemoryPointer) -> EvalResult<'tcx, Option>> { + // we don't care about the pointee type, we just want a pointer + match self.read_ptr(vtable, self.tcx.mk_nil_ptr())? { + // some values don't need to call a drop impl, so the value is null + Value::ByVal(PrimVal::Bytes(0)) => Ok(None), + Value::ByVal(PrimVal::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some), + _ => Err(EvalError::ReadBytesAsPointer), + } + } + + pub fn read_size_and_align_from_vtable(&self, vtable: MemoryPointer) -> EvalResult<'tcx, (u64, u64)> { + let pointer_size = self.memory.pointer_size(); + let size = self.memory.read_usize(vtable.offset(pointer_size, self.memory.layout)?)?; + let align = self.memory.read_usize(vtable.offset(pointer_size * 2, self.memory.layout)?)?; + Ok((size, align)) + } + + pub(crate) fn resolve_associated_const( + &self, + def_id: DefId, + substs: &'tcx Substs<'tcx>, + ) -> ty::Instance<'tcx> { + if let Some(trait_id) = self.tcx.trait_of_item(def_id) { + let trait_ref = ty::Binder(ty::TraitRef::new(trait_id, substs)); + let vtable = self.fulfill_obligation(trait_ref); + if let traits::VtableImpl(vtable_impl) = vtable { + let name = self.tcx.item_name(def_id); + let assoc_const_opt = self.tcx.associated_items(vtable_impl.impl_def_id) + .find(|item| item.kind == ty::AssociatedKind::Const && item.name == name); + if let Some(assoc_const) = assoc_const_opt { + return ty::Instance::new(assoc_const.def_id, vtable_impl.substs); + } + } + } + ty::Instance::new(def_id, substs) + } +} diff --git a/src/librustc_mir/miri/value.rs b/src/librustc_mir/miri/value.rs new file mode 100644 index 0000000000000..70c5d3c5140b3 --- /dev/null +++ b/src/librustc_mir/miri/value.rs @@ -0,0 +1,421 @@ +#![allow(unknown_lints)] +#![allow(float_cmp)] + +use rustc::ty::layout::TargetDataLayout; + +use super::error::{EvalError, EvalResult}; +use super::memory::{Memory, MemoryPointer, HasMemory}; + +pub(super) fn bytes_to_f32(bytes: u128) -> f32 { + f32::from_bits(bytes as u32) +} + +pub(super) fn bytes_to_f64(bytes: u128) -> f64 { + f64::from_bits(bytes as u64) +} + +pub(super) fn f32_to_bytes(f: f32) -> u128 { + f.to_bits() as u128 +} + +pub(super) fn f64_to_bytes(f: f64) -> u128 { + f.to_bits() as u128 +} + +/// A `Value` represents a single self-contained Rust value. +/// +/// A `Value` can either refer to a block of memory inside an allocation (`ByRef`) or to a primitve +/// value held directly, outside of any allocation (`ByVal`). For `ByRef`-values, we remember +/// whether the pointer is supposed to be aligned or not (also see Lvalue). +/// +/// For optimization of a few very common cases, there is also a representation for a pair of +/// primitive values (`ByValPair`). It allows Miri to avoid making allocations for checked binary +/// operations and fat pointers. This idea was taken from rustc's trans. +#[derive(Clone, Copy, Debug)] +pub enum Value { + ByRef(Pointer, bool), + ByVal(PrimVal), + ByValPair(PrimVal, PrimVal), +} + +/// A wrapper type around `PrimVal` that cannot be turned back into a `PrimVal` accidentally. +/// This type clears up a few APIs where having a `PrimVal` argument for something that is +/// potentially an integer pointer or a pointer to an allocation was unclear. +/// +/// I (@oli-obk) believe it is less easy to mix up generic primvals and primvals that are just +/// the representation of pointers. Also all the sites that convert between primvals and pointers +/// are explicit now (and rare!) +#[derive(Clone, Copy, Debug)] +pub struct Pointer { + primval: PrimVal, +} + +impl<'tcx> Pointer { + pub fn null() -> Self { + PrimVal::Bytes(0).into() + } + pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> { + self.primval.to_ptr() + } + pub fn into_inner_primval(self) -> PrimVal { + self.primval + } + + pub(crate) fn signed_offset(self, i: i64, layout: &TargetDataLayout) -> EvalResult<'tcx, Self> { + match self.primval { + PrimVal::Bytes(b) => { + assert_eq!(b as u64 as u128, b); + Ok(Pointer::from(PrimVal::Bytes(signed_offset(b as u64, i, layout)? as u128))) + }, + PrimVal::Ptr(ptr) => ptr.signed_offset(i, layout).map(Pointer::from), + PrimVal::Undef => Err(EvalError::ReadUndefBytes), + } + } + + pub(crate) fn offset(self, i: u64, layout: &TargetDataLayout) -> EvalResult<'tcx, Self> { + match self.primval { + PrimVal::Bytes(b) => { + assert_eq!(b as u64 as u128, b); + Ok(Pointer::from(PrimVal::Bytes(offset(b as u64, i, layout)? as u128))) + }, + PrimVal::Ptr(ptr) => ptr.offset(i, layout).map(Pointer::from), + PrimVal::Undef => Err(EvalError::ReadUndefBytes), + } + } + + pub(crate) fn wrapping_signed_offset(self, i: i64, layout: &TargetDataLayout) -> EvalResult<'tcx, Self> { + match self.primval { + PrimVal::Bytes(b) => { + assert_eq!(b as u64 as u128, b); + Ok(Pointer::from(PrimVal::Bytes(wrapping_signed_offset(b as u64, i, layout) as u128))) + }, + PrimVal::Ptr(ptr) => Ok(Pointer::from(ptr.wrapping_signed_offset(i, layout))), + PrimVal::Undef => Err(EvalError::ReadUndefBytes), + } + } + + pub fn is_null(self) -> EvalResult<'tcx, bool> { + match self.primval { + PrimVal::Bytes(b) => Ok(b == 0), + PrimVal::Ptr(_) => Ok(false), + PrimVal::Undef => Err(EvalError::ReadUndefBytes), + } + } + + pub fn to_value_with_len(self, len: u64) -> Value { + Value::ByValPair(self.primval, PrimVal::from_u128(len as u128)) + } + + pub fn to_value_with_vtable(self, vtable: MemoryPointer) -> Value { + Value::ByValPair(self.primval, PrimVal::Ptr(vtable)) + } + + pub fn to_value(self) -> Value { + Value::ByVal(self.primval) + } +} + +impl ::std::convert::From for Pointer { + fn from(primval: PrimVal) -> Self { + Pointer { primval } + } +} + +impl ::std::convert::From for Pointer { + fn from(ptr: MemoryPointer) -> Self { + PrimVal::Ptr(ptr).into() + } +} + +/// A `PrimVal` represents an immediate, primitive value existing outside of a +/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in +/// size. Like a range of bytes in an `Allocation`, a `PrimVal` can either represent the raw bytes +/// of a simple value, a pointer into another `Allocation`, or be undefined. +#[derive(Clone, Copy, Debug)] +pub enum PrimVal { + /// The raw bytes of a simple value. + Bytes(u128), + + /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of + /// relocations, but a `PrimVal` is only large enough to contain one, so we just represent the + /// relocation and its associated offset together as a `MemoryPointer` here. + Ptr(MemoryPointer), + + /// An undefined `PrimVal`, for representing values that aren't safe to examine, but are safe + /// to copy around, just like undefined bytes in an `Allocation`. + Undef, +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum PrimValKind { + I8, I16, I32, I64, I128, + U8, U16, U32, U64, U128, + F32, F64, + Bool, + Char, + Ptr, + FnPtr, +} + +impl<'a, 'tcx: 'a> Value { + #[inline] + pub(super) fn by_ref(ptr: Pointer) -> Self { + Value::ByRef(ptr, true) + } + + /// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef, + /// this may have to perform a load. + pub(super) fn into_ptr(&self, mem: &mut Memory<'a, 'tcx>) -> EvalResult<'tcx, Pointer> { + use self::Value::*; + match *self { + ByRef(ptr, aligned) => { + mem.read_maybe_aligned(aligned, |mem| mem.read_ptr(ptr.to_ptr()?) ) + }, + ByVal(ptr) | ByValPair(ptr, _) => Ok(ptr.into()), + } + } + + pub(super) fn into_ptr_vtable_pair( + &self, + mem: &mut Memory<'a, 'tcx> + ) -> EvalResult<'tcx, (Pointer, MemoryPointer)> { + use self::Value::*; + match *self { + ByRef(ref_ptr, aligned) => { + mem.read_maybe_aligned(aligned, |mem| { + let ptr = mem.read_ptr(ref_ptr.to_ptr()?)?; + let vtable = mem.read_ptr(ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?)?; + Ok((ptr, vtable.to_ptr()?)) + }) + } + + ByValPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)), + + _ => bug!("expected ptr and vtable, got {:?}", self), + } + } + + pub(super) fn into_slice(&self, mem: &mut Memory<'a, 'tcx>) -> EvalResult<'tcx, (Pointer, u64)> { + use self::Value::*; + match *self { + ByRef(ref_ptr, aligned) => { + mem.write_maybe_aligned(aligned, |mem| { + let ptr = mem.read_ptr(ref_ptr.to_ptr()?)?; + let len = mem.read_usize(ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?)?; + Ok((ptr, len)) + }) + }, + ByValPair(ptr, val) => { + let len = val.to_u128()?; + assert_eq!(len as u64 as u128, len); + Ok((ptr.into(), len as u64)) + }, + ByVal(_) => bug!("expected ptr and length, got {:?}", self), + } + } +} + +impl<'tcx> PrimVal { + pub fn from_u128(n: u128) -> Self { + PrimVal::Bytes(n) + } + + pub fn from_i128(n: i128) -> Self { + PrimVal::Bytes(n as u128) + } + + pub fn from_f32(f: f32) -> Self { + PrimVal::Bytes(f32_to_bytes(f)) + } + + pub fn from_f64(f: f64) -> Self { + PrimVal::Bytes(f64_to_bytes(f)) + } + + pub fn from_bool(b: bool) -> Self { + PrimVal::Bytes(b as u128) + } + + pub fn from_char(c: char) -> Self { + PrimVal::Bytes(c as u128) + } + + pub fn to_bytes(self) -> EvalResult<'tcx, u128> { + match self { + PrimVal::Bytes(b) => Ok(b), + PrimVal::Ptr(_) => Err(EvalError::ReadPointerAsBytes), + PrimVal::Undef => Err(EvalError::ReadUndefBytes), + } + } + + pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> { + match self { + PrimVal::Bytes(_) => Err(EvalError::ReadBytesAsPointer), + PrimVal::Ptr(p) => Ok(p), + PrimVal::Undef => Err(EvalError::ReadUndefBytes), + } + } + + pub fn is_bytes(self) -> bool { + match self { + PrimVal::Bytes(_) => true, + _ => false, + } + } + + pub fn is_ptr(self) -> bool { + match self { + PrimVal::Ptr(_) => true, + _ => false, + } + } + + pub fn is_undef(self) -> bool { + match self { + PrimVal::Undef => true, + _ => false, + } + } + + pub fn to_u128(self) -> EvalResult<'tcx, u128> { + self.to_bytes() + } + + pub fn to_u64(self) -> EvalResult<'tcx, u64> { + self.to_bytes().map(|b| { + assert_eq!(b as u64 as u128, b); + b as u64 + }) + } + + pub fn to_i32(self) -> EvalResult<'tcx, i32> { + self.to_bytes().map(|b| { + assert_eq!(b as i32 as u128, b); + b as i32 + }) + } + + pub fn to_i128(self) -> EvalResult<'tcx, i128> { + self.to_bytes().map(|b| b as i128) + } + + pub fn to_i64(self) -> EvalResult<'tcx, i64> { + self.to_bytes().map(|b| { + assert_eq!(b as i64 as u128, b); + b as i64 + }) + } + + pub fn to_f32(self) -> EvalResult<'tcx, f32> { + self.to_bytes().map(bytes_to_f32) + } + + pub fn to_f64(self) -> EvalResult<'tcx, f64> { + self.to_bytes().map(bytes_to_f64) + } + + pub fn to_bool(self) -> EvalResult<'tcx, bool> { + match self.to_bytes()? { + 0 => Ok(false), + 1 => Ok(true), + _ => Err(EvalError::InvalidBool), + } + } +} + +// Overflow checking only works properly on the range from -u64 to +u64. +pub fn overflowing_signed_offset<'tcx>(val: u64, i: i128, layout: &TargetDataLayout) -> (u64, bool) { + // FIXME: is it possible to over/underflow here? + if i < 0 { + // trickery to ensure that i64::min_value() works fine + // this formula only works for true negative values, it panics for zero! + let n = u64::max_value() - (i as u64) + 1; + val.overflowing_sub(n) + } else { + overflowing_offset(val, i as u64, layout) + } +} + +pub fn overflowing_offset<'tcx>(val: u64, i: u64, layout: &TargetDataLayout) -> (u64, bool) { + let (res, over) = val.overflowing_add(i); + ((res as u128 % (1u128 << layout.pointer_size.bits())) as u64, + over || res as u128 >= (1u128 << layout.pointer_size.bits())) +} + +pub fn signed_offset<'tcx>(val: u64, i: i64, layout: &TargetDataLayout) -> EvalResult<'tcx, u64> { + let (res, over) = overflowing_signed_offset(val, i as i128, layout); + if over { + Err(EvalError::OverflowingMath) + } else { + Ok(res) + } +} + +pub fn offset<'tcx>(val: u64, i: u64, layout: &TargetDataLayout) -> EvalResult<'tcx, u64> { + let (res, over) = overflowing_offset(val, i, layout); + if over { + Err(EvalError::OverflowingMath) + } else { + Ok(res) + } +} + +pub fn wrapping_signed_offset<'tcx>(val: u64, i: i64, layout: &TargetDataLayout) -> u64 { + overflowing_signed_offset(val, i as i128, layout).0 +} + +impl PrimValKind { + pub fn is_int(self) -> bool { + use self::PrimValKind::*; + match self { + I8 | I16 | I32 | I64 | I128 | U8 | U16 | U32 | U64 | U128 => true, + _ => false, + } + } + + pub fn is_signed_int(self) -> bool { + use self::PrimValKind::*; + match self { + I8 | I16 | I32 | I64 | I128 => true, + _ => false, + } + } + + pub fn is_float(self) -> bool { + use self::PrimValKind::*; + match self { + F32 | F64 => true, + _ => false, + } + } + + pub fn from_uint_size(size: u64) -> Self { + match size { + 1 => PrimValKind::U8, + 2 => PrimValKind::U16, + 4 => PrimValKind::U32, + 8 => PrimValKind::U64, + 16 => PrimValKind::U128, + _ => bug!("can't make uint with size {}", size), + } + } + + pub fn from_int_size(size: u64) -> Self { + match size { + 1 => PrimValKind::I8, + 2 => PrimValKind::I16, + 4 => PrimValKind::I32, + 8 => PrimValKind::I64, + 16 => PrimValKind::I128, + _ => bug!("can't make int with size {}", size), + } + } + + pub fn is_ptr(self) -> bool { + use self::PrimValKind::*; + match self { + Ptr | FnPtr => true, + _ => false, + } + } +} diff --git a/tests/compile-fail-fullmir/undefined_byte_read.rs b/tests/compile-fail-fullmir/undefined_byte_read.rs new file mode 100644 index 0000000000000..f8b6f7f4aec13 --- /dev/null +++ b/tests/compile-fail-fullmir/undefined_byte_read.rs @@ -0,0 +1,6 @@ +fn main() { + let v: Vec = Vec::with_capacity(10); + let undef = unsafe { *v.get_unchecked(5) }; + let x = undef + 1; //~ ERROR: attempted to read undefined bytes + panic!("this should never print: {}", x); +} diff --git a/tests/compile-fail/alignment.rs b/tests/compile-fail/alignment.rs new file mode 100644 index 0000000000000..4faaa359df624 --- /dev/null +++ b/tests/compile-fail/alignment.rs @@ -0,0 +1,11 @@ +fn main() { + // miri always gives allocations the worst possible alignment, so a `u8` array is guaranteed + // to be at the virtual location 1 (so one byte offset from the ultimate alignemnt location 0) + let mut x = [0u8; 20]; + let x_ptr: *mut u8 = &mut x[0]; + let y_ptr = x_ptr as *mut u64; + unsafe { + *y_ptr = 42; //~ ERROR tried to access memory with alignment 1, but alignment + } + panic!("unreachable in miri"); +} diff --git a/tests/compile-fail/assume.rs b/tests/compile-fail/assume.rs new file mode 100644 index 0000000000000..69758a5d7fe8c --- /dev/null +++ b/tests/compile-fail/assume.rs @@ -0,0 +1,10 @@ +#![feature(core_intrinsics)] + +fn main() { + let x = 5; + unsafe { + std::intrinsics::assume(x < 10); + std::intrinsics::assume(x > 1); + std::intrinsics::assume(x > 42); //~ ERROR: `assume` argument was false + } +} diff --git a/tests/compile-fail/bitop-beyond-alignment.rs b/tests/compile-fail/bitop-beyond-alignment.rs new file mode 100644 index 0000000000000..a30c054ab5d04 --- /dev/null +++ b/tests/compile-fail/bitop-beyond-alignment.rs @@ -0,0 +1,37 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(dead_code)] + +use std::mem; + +enum Tag { + Tag2(A) +} + +struct Rec { + c8: u8, + t: Tag +} + +fn mk_rec() -> Rec { + return Rec { c8:0, t:Tag::Tag2(0) }; +} + +fn is_u64_aligned(u: &Tag) -> bool { + let p: usize = unsafe { mem::transmute(u) }; + let u64_align = std::mem::align_of::(); + return (p & (u64_align + 1)) == 0; //~ ERROR a raw memory access tried to access part of a pointer value as raw bytes +} + +pub fn main() { + let x = mk_rec(); + assert!(is_u64_aligned(&x.t)); +} diff --git a/tests/compile-fail/cast_box_int_to_fn_ptr.rs b/tests/compile-fail/cast_box_int_to_fn_ptr.rs new file mode 100644 index 0000000000000..96469814be29b --- /dev/null +++ b/tests/compile-fail/cast_box_int_to_fn_ptr.rs @@ -0,0 +1,8 @@ +fn main() { + let b = Box::new(42); + let g = unsafe { + std::mem::transmute::<&usize, &fn(i32)>(&b) + }; + + (*g)(42) //~ ERROR a memory access tried to interpret some bytes as a pointer +} diff --git a/tests/compile-fail/cast_fn_ptr.rs b/tests/compile-fail/cast_fn_ptr.rs new file mode 100644 index 0000000000000..7509ae6ed77cb --- /dev/null +++ b/tests/compile-fail/cast_fn_ptr.rs @@ -0,0 +1,9 @@ +fn main() { + fn f() {} + + let g = unsafe { + std::mem::transmute::(f) + }; + + g(42) //~ ERROR tried to call a function with sig fn() through a function pointer of type fn(i32) +} diff --git a/tests/compile-fail/cast_fn_ptr2.rs b/tests/compile-fail/cast_fn_ptr2.rs new file mode 100644 index 0000000000000..5d902e1f9aaaf --- /dev/null +++ b/tests/compile-fail/cast_fn_ptr2.rs @@ -0,0 +1,9 @@ +fn main() { + fn f(_ : (i32,i32)) {} + + let g = unsafe { + std::mem::transmute::(f) + }; + + g(42) //~ ERROR tried to call a function with sig fn((i32, i32)) through a function pointer of type fn(i32) +} diff --git a/tests/compile-fail/cast_fn_ptr_unsafe.rs b/tests/compile-fail/cast_fn_ptr_unsafe.rs new file mode 100644 index 0000000000000..568681da3c5d3 --- /dev/null +++ b/tests/compile-fail/cast_fn_ptr_unsafe.rs @@ -0,0 +1,10 @@ +// just making sure that fn -> unsafe fn casts are handled by rustc so miri doesn't have to +fn main() { + fn f() {} + + let g = f as fn() as unsafe fn(i32); //~ERROR: non-primitive cast: `fn()` as `unsafe fn(i32)` + + unsafe { + g(42); + } +} diff --git a/tests/compile-fail/cast_fn_ptr_unsafe2.rs b/tests/compile-fail/cast_fn_ptr_unsafe2.rs new file mode 100644 index 0000000000000..314365939fe80 --- /dev/null +++ b/tests/compile-fail/cast_fn_ptr_unsafe2.rs @@ -0,0 +1,10 @@ +// just making sure that fn -> unsafe fn casts are handled by rustc so miri doesn't have to +fn main() { + fn f() {} + + let g = f as fn() as fn(i32) as unsafe fn(i32); //~ERROR: non-primitive cast: `fn()` as `fn(i32)` + + unsafe { + g(42); + } +} diff --git a/tests/compile-fail/cast_int_to_fn_ptr.rs b/tests/compile-fail/cast_int_to_fn_ptr.rs new file mode 100644 index 0000000000000..28d56a2cb6271 --- /dev/null +++ b/tests/compile-fail/cast_int_to_fn_ptr.rs @@ -0,0 +1,7 @@ +fn main() { + let g = unsafe { + std::mem::transmute::(42) + }; + + g(42) //~ ERROR a memory access tried to interpret some bytes as a pointer +} diff --git a/tests/compile-fail/copy_nonoverlapping.rs b/tests/compile-fail/copy_nonoverlapping.rs new file mode 100644 index 0000000000000..f4acbadfd549d --- /dev/null +++ b/tests/compile-fail/copy_nonoverlapping.rs @@ -0,0 +1,24 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(core_intrinsics)] + +use std::intrinsics::*; + +//error-pattern: copy_nonoverlapping called on overlapping ranges + +fn main() { + let mut data = [0u8; 16]; + unsafe { + let a = &data[0] as *const _; + let b = &mut data[1] as *mut _; + std::ptr::copy_nonoverlapping(a, b, 2); + } +} diff --git a/tests/compile-fail/ctlz_nonzero.rs b/tests/compile-fail/ctlz_nonzero.rs new file mode 100644 index 0000000000000..704c4d4b7d462 --- /dev/null +++ b/tests/compile-fail/ctlz_nonzero.rs @@ -0,0 +1,15 @@ +#![feature(intrinsics)] + +mod rusti { + extern "rust-intrinsic" { + pub fn ctlz_nonzero(x: T) -> T; + } +} + +pub fn main() { + unsafe { + use rusti::*; + + ctlz_nonzero(0u8); //~ ERROR: ctlz_nonzero called on 0 + } +} diff --git a/tests/compile-fail/cttz_nonzero.rs b/tests/compile-fail/cttz_nonzero.rs new file mode 100644 index 0000000000000..eda25c6615214 --- /dev/null +++ b/tests/compile-fail/cttz_nonzero.rs @@ -0,0 +1,15 @@ +#![feature(intrinsics)] + +mod rusti { + extern "rust-intrinsic" { + pub fn cttz_nonzero(x: T) -> T; + } +} + +pub fn main() { + unsafe { + use rusti::*; + + cttz_nonzero(0u8); //~ ERROR: cttz_nonzero called on 0 + } +} diff --git a/tests/compile-fail/dangling_pointer_deref.rs b/tests/compile-fail/dangling_pointer_deref.rs new file mode 100644 index 0000000000000..0ede7c96f0047 --- /dev/null +++ b/tests/compile-fail/dangling_pointer_deref.rs @@ -0,0 +1,8 @@ +fn main() { + let p = { + let b = Box::new(42); + &*b as *const i32 + }; + let x = unsafe { *p }; //~ ERROR: dangling pointer was dereferenced + panic!("this should never print: {}", x); +} diff --git a/tests/compile-fail/deallocate-bad-alignment.rs b/tests/compile-fail/deallocate-bad-alignment.rs new file mode 100644 index 0000000000000..a0bcffa47d9fc --- /dev/null +++ b/tests/compile-fail/deallocate-bad-alignment.rs @@ -0,0 +1,16 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +// error-pattern: tried to deallocate or reallocate using incorrect alignment or size + +use alloc::heap::*; +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 1)).unwrap(); + Heap.dealloc(x, Layout::from_size_align_unchecked(1, 2)); + } +} diff --git a/tests/compile-fail/deallocate-bad-size.rs b/tests/compile-fail/deallocate-bad-size.rs new file mode 100644 index 0000000000000..d8c4493043dbb --- /dev/null +++ b/tests/compile-fail/deallocate-bad-size.rs @@ -0,0 +1,16 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +// error-pattern: tried to deallocate or reallocate using incorrect alignment or size + +use alloc::heap::*; +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 1)).unwrap(); + Heap.dealloc(x, Layout::from_size_align_unchecked(2, 1)); + } +} diff --git a/tests/compile-fail/deallocate-twice.rs b/tests/compile-fail/deallocate-twice.rs new file mode 100644 index 0000000000000..fd3cccfd53a91 --- /dev/null +++ b/tests/compile-fail/deallocate-twice.rs @@ -0,0 +1,17 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +// error-pattern: tried to deallocate dangling pointer + +use alloc::heap::*; +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 1)).unwrap(); + Heap.dealloc(x, Layout::from_size_align_unchecked(1, 1)); + Heap.dealloc(x, Layout::from_size_align_unchecked(1, 1)); + } +} diff --git a/tests/compile-fail/deref_fn_ptr.rs b/tests/compile-fail/deref_fn_ptr.rs new file mode 100644 index 0000000000000..c1eaf7eaa61d2 --- /dev/null +++ b/tests/compile-fail/deref_fn_ptr.rs @@ -0,0 +1,8 @@ +fn f() {} + +fn main() { + let x: i32 = unsafe { + *std::mem::transmute::(f) //~ ERROR: tried to dereference a function pointer + }; + panic!("this should never print: {}", x); +} diff --git a/tests/compile-fail/div-by-zero-2.rs b/tests/compile-fail/div-by-zero-2.rs new file mode 100644 index 0000000000000..3e869ad4a5078 --- /dev/null +++ b/tests/compile-fail/div-by-zero-2.rs @@ -0,0 +1,15 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(const_err)] + +fn main() { + let _n = 1 / 0; //~ ERROR: DivisionByZero +} diff --git a/tests/compile-fail/div-by-zero.rs b/tests/compile-fail/div-by-zero.rs new file mode 100644 index 0000000000000..4ac6214d88abb --- /dev/null +++ b/tests/compile-fail/div-by-zero.rs @@ -0,0 +1,21 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(core_intrinsics)] + +use std::intrinsics::*; + +//error-pattern: Division by 0 in unchecked_div + +fn main() { + unsafe { + let _n = unchecked_div(1i64, 0); + } +} diff --git a/tests/compile-fail/execute_memory.rs b/tests/compile-fail/execute_memory.rs new file mode 100644 index 0000000000000..8d3c9df0320ba --- /dev/null +++ b/tests/compile-fail/execute_memory.rs @@ -0,0 +1,9 @@ +#![feature(box_syntax)] + +fn main() { + let x = box 42; + unsafe { + let f = std::mem::transmute::, fn()>(x); + f() //~ ERROR: tried to treat a memory pointer as a function pointer + } +} diff --git a/tests/compile-fail/fn_ptr_offset.rs b/tests/compile-fail/fn_ptr_offset.rs new file mode 100644 index 0000000000000..2d240b6a55ade --- /dev/null +++ b/tests/compile-fail/fn_ptr_offset.rs @@ -0,0 +1,11 @@ +use std::mem; + +fn f() {} + +fn main() { + let x : fn() = f; + let y : *mut u8 = unsafe { mem::transmute(x) }; + let y = y.wrapping_offset(1); + let x : fn() = unsafe { mem::transmute(y) }; + x(); //~ ERROR: tried to use an integer pointer or a dangling pointer as a function pointer +} diff --git a/tests/compile-fail/int_ptr_cast.rs b/tests/compile-fail/int_ptr_cast.rs new file mode 100644 index 0000000000000..ae5f65a7166c6 --- /dev/null +++ b/tests/compile-fail/int_ptr_cast.rs @@ -0,0 +1,5 @@ +fn main() { + let x = 2usize as *const u32; + // This must fail because alignment is violated + let _ = unsafe { &*x }; //~ ERROR: tried to access memory with alignment 2, but alignment 4 is required +} diff --git a/tests/compile-fail/int_ptr_cast2.rs b/tests/compile-fail/int_ptr_cast2.rs new file mode 100644 index 0000000000000..1897066f7bcc3 --- /dev/null +++ b/tests/compile-fail/int_ptr_cast2.rs @@ -0,0 +1,5 @@ +fn main() { + let x = 0usize as *const u32; + // This must fail because the pointer is NULL + let _ = unsafe { &*x }; //~ ERROR: invalid use of NULL pointer +} diff --git a/tests/compile-fail/invalid_bool.rs b/tests/compile-fail/invalid_bool.rs new file mode 100644 index 0000000000000..9de2630797ece --- /dev/null +++ b/tests/compile-fail/invalid_bool.rs @@ -0,0 +1,4 @@ +fn main() { + let b = unsafe { std::mem::transmute::(2) }; + if b { unreachable!() } else { unreachable!() } //~ ERROR: invalid boolean value read +} diff --git a/tests/compile-fail/invalid_enum_discriminant.rs b/tests/compile-fail/invalid_enum_discriminant.rs new file mode 100644 index 0000000000000..bde78200b3c47 --- /dev/null +++ b/tests/compile-fail/invalid_enum_discriminant.rs @@ -0,0 +1,14 @@ +#[repr(C)] +pub enum Foo { + A, B, C, D +} + +fn main() { + let f = unsafe { std::mem::transmute::(42) }; + match f { + Foo::A => {}, //~ ERROR invalid enum discriminant value read + Foo::B => {}, + Foo::C => {}, + Foo::D => {}, + } +} diff --git a/tests/compile-fail/match_char.rs b/tests/compile-fail/match_char.rs new file mode 100644 index 0000000000000..a91c7fef6aa1e --- /dev/null +++ b/tests/compile-fail/match_char.rs @@ -0,0 +1,8 @@ +fn main() { + assert!(std::char::from_u32(-1_i32 as u32).is_none()); + match unsafe { std::mem::transmute::(-1) } { + 'a' => {}, //~ERROR tried to interpret an invalid 32-bit value as a char: 4294967295 + 'b' => {}, + _ => {}, + } +} diff --git a/tests/compile-fail/memleak.rs b/tests/compile-fail/memleak.rs new file mode 100644 index 0000000000000..71b4e2f442f31 --- /dev/null +++ b/tests/compile-fail/memleak.rs @@ -0,0 +1,5 @@ +//error-pattern: the evaluated program leaked memory + +fn main() { + std::mem::forget(Box::new(42)); +} diff --git a/tests/compile-fail/memleak_rc.rs b/tests/compile-fail/memleak_rc.rs new file mode 100644 index 0000000000000..b2bc6722afb04 --- /dev/null +++ b/tests/compile-fail/memleak_rc.rs @@ -0,0 +1,12 @@ +//error-pattern: the evaluated program leaked memory + +use std::rc::Rc; +use std::cell::RefCell; + +struct Dummy(Rc>>); + +fn main() { + let x = Dummy(Rc::new(RefCell::new(None))); + let y = Dummy(x.0.clone()); + *x.0.borrow_mut() = Some(y); +} diff --git a/tests/compile-fail/modifying_constants.rs b/tests/compile-fail/modifying_constants.rs new file mode 100644 index 0000000000000..cb2e7217d5797 --- /dev/null +++ b/tests/compile-fail/modifying_constants.rs @@ -0,0 +1,6 @@ +fn main() { + let x = &1; // the `&1` is promoted to a constant, but it used to be that only the pointer is marked static, not the pointee + let y = unsafe { &mut *(x as *const i32 as *mut i32) }; + *y = 42; //~ ERROR tried to modify constant memory + assert_eq!(*x, 42); +} diff --git a/tests/compile-fail/never_say_never.rs b/tests/compile-fail/never_say_never.rs new file mode 100644 index 0000000000000..5d7e9fec62c24 --- /dev/null +++ b/tests/compile-fail/never_say_never.rs @@ -0,0 +1,12 @@ +#![feature(never_type)] +#![allow(unreachable_code)] + +fn main() { + let y = &5; + let x: ! = unsafe { + *(y as *const _ as *const !) //~ ERROR entered unreachable code + }; + f(x) +} + +fn f(x: !) -> ! { x } diff --git a/tests/compile-fail/never_transmute_humans.rs b/tests/compile-fail/never_transmute_humans.rs new file mode 100644 index 0000000000000..38406eeb3fea6 --- /dev/null +++ b/tests/compile-fail/never_transmute_humans.rs @@ -0,0 +1,14 @@ +#![feature(never_type)] +#![allow(unreachable_code)] +#![allow(unused_variables)] + +struct Human; + +fn main() { + let x: ! = unsafe { + std::mem::transmute::(Human) //~ ERROR entered unreachable code + }; + f(x) +} + +fn f(x: !) -> ! { x } diff --git a/tests/compile-fail/never_transmute_void.rs b/tests/compile-fail/never_transmute_void.rs new file mode 100644 index 0000000000000..3fffacc55ea47 --- /dev/null +++ b/tests/compile-fail/never_transmute_void.rs @@ -0,0 +1,16 @@ +#![feature(never_type)] +#![allow(unreachable_code)] +#![allow(unused_variables)] + +enum Void {} + +fn f(v: Void) -> ! { + match v {} +} + +fn main() { + let v: Void = unsafe { + std::mem::transmute::<(), Void>(()) //~ ERROR entered unreachable code + }; + f(v); +} diff --git a/tests/compile-fail/null_pointer_deref.rs b/tests/compile-fail/null_pointer_deref.rs new file mode 100644 index 0000000000000..20b93aab1607d --- /dev/null +++ b/tests/compile-fail/null_pointer_deref.rs @@ -0,0 +1,4 @@ +fn main() { + let x: i32 = unsafe { *std::ptr::null() }; //~ ERROR: a memory access tried to interpret some bytes as a pointer + panic!("this should never print: {}", x); +} diff --git a/tests/compile-fail/oom.rs b/tests/compile-fail/oom.rs new file mode 100644 index 0000000000000..d4aebb912ee17 --- /dev/null +++ b/tests/compile-fail/oom.rs @@ -0,0 +1,7 @@ +#![feature(custom_attribute, attr_literals)] +#![miri(memory_size=4095)] + +fn main() { + let _x = [42; 1024]; + //~^ERROR tried to allocate 4096 more bytes, but only +} diff --git a/tests/compile-fail/oom2.rs b/tests/compile-fail/oom2.rs new file mode 100644 index 0000000000000..1a4a47efe6858 --- /dev/null +++ b/tests/compile-fail/oom2.rs @@ -0,0 +1,8 @@ +#![feature(box_syntax, custom_attribute, attr_literals)] +#![miri(memory_size=2048)] + +fn main() { + loop { + ::std::mem::forget(box 42); //~ ERROR tried to allocate 4 more bytes + } +} diff --git a/tests/compile-fail/out_of_bounds_ptr_1.rs b/tests/compile-fail/out_of_bounds_ptr_1.rs new file mode 100644 index 0000000000000..8dce7e5786264 --- /dev/null +++ b/tests/compile-fail/out_of_bounds_ptr_1.rs @@ -0,0 +1,8 @@ +// error-pattern: pointer computed at offset 5, outside bounds of allocation +fn main() { + let v = [0i8; 4]; + let x = &v as *const i8; + // The error is inside another function, so we cannot match it by line + let x = unsafe { x.offset(5) }; + panic!("this should never print: {:?}", x); +} diff --git a/tests/compile-fail/out_of_bounds_ptr_2.rs b/tests/compile-fail/out_of_bounds_ptr_2.rs new file mode 100644 index 0000000000000..f7546494574b0 --- /dev/null +++ b/tests/compile-fail/out_of_bounds_ptr_2.rs @@ -0,0 +1,7 @@ +// error-pattern: overflowing math +fn main() { + let v = [0i8; 4]; + let x = &v as *const i8; + let x = unsafe { x.offset(-1) }; + panic!("this should never print: {:?}", x); +} diff --git a/tests/compile-fail/out_of_bounds_read.rs b/tests/compile-fail/out_of_bounds_read.rs new file mode 100644 index 0000000000000..8c56b14bdf221 --- /dev/null +++ b/tests/compile-fail/out_of_bounds_read.rs @@ -0,0 +1,5 @@ +fn main() { + let v: Vec = vec![1, 2]; + let x = unsafe { *v.as_ptr().wrapping_offset(5) }; //~ ERROR: which has size 2 + panic!("this should never print: {}", x); +} diff --git a/tests/compile-fail/out_of_bounds_read2.rs b/tests/compile-fail/out_of_bounds_read2.rs new file mode 100644 index 0000000000000..d29b22ffb2a6b --- /dev/null +++ b/tests/compile-fail/out_of_bounds_read2.rs @@ -0,0 +1,5 @@ +fn main() { + let v: Vec = vec![1, 2]; + let x = unsafe { *v.as_ptr().wrapping_offset(5) }; //~ ERROR: memory access at offset 6, outside bounds of allocation + panic!("this should never print: {}", x); +} diff --git a/tests/compile-fail/overflowing-lsh-neg.rs b/tests/compile-fail/overflowing-lsh-neg.rs new file mode 100644 index 0000000000000..3a889be741efd --- /dev/null +++ b/tests/compile-fail/overflowing-lsh-neg.rs @@ -0,0 +1,16 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(exceeding_bitshifts)] +#![allow(const_err)] + +fn main() { + let _n = 2i64 << -1; //~ Overflow(Shl) +} diff --git a/tests/compile-fail/overflowing-rsh-2.rs b/tests/compile-fail/overflowing-rsh-2.rs new file mode 100644 index 0000000000000..ac09a1740c43e --- /dev/null +++ b/tests/compile-fail/overflowing-rsh-2.rs @@ -0,0 +1,16 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(exceeding_bitshifts, const_err)] + +fn main() { + // Make sure we catch overflows that would be hidden by first casting the RHS to u32 + let _n = 1i64 >> (u32::max_value() as i64 + 1); //~ Overflow(Shr) +} diff --git a/tests/compile-fail/overflowing-rsh.rs b/tests/compile-fail/overflowing-rsh.rs new file mode 100644 index 0000000000000..a7ac9d1d50398 --- /dev/null +++ b/tests/compile-fail/overflowing-rsh.rs @@ -0,0 +1,15 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(exceeding_bitshifts)] + +fn main() { + let _n = 1i64 >> 64; //~ Overflow(Shr) +} diff --git a/tests/compile-fail/overflowing-unchecked-rsh.rs b/tests/compile-fail/overflowing-unchecked-rsh.rs new file mode 100644 index 0000000000000..b8291e1300edf --- /dev/null +++ b/tests/compile-fail/overflowing-unchecked-rsh.rs @@ -0,0 +1,21 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(core_intrinsics)] + +use std::intrinsics::*; + +//error-pattern: Overflowing shift by 64 in unchecked_shr + +fn main() { + unsafe { + let _n = unchecked_shr(1i64, 64); + } +} diff --git a/tests/compile-fail/overwriting_part_of_relocation_makes_the_rest_undefined.rs b/tests/compile-fail/overwriting_part_of_relocation_makes_the_rest_undefined.rs new file mode 100644 index 0000000000000..50f51d0ba9cad --- /dev/null +++ b/tests/compile-fail/overwriting_part_of_relocation_makes_the_rest_undefined.rs @@ -0,0 +1,11 @@ +fn main() { + let mut p = &42; + unsafe { + let ptr: *mut _ = &mut p; + *(ptr as *mut u8) = 123; // if we ever support 8 bit pointers, this is gonna cause + // "attempted to interpret some raw bytes as a pointer address" instead of + // "attempted to read undefined bytes" + } + let x = *p; //~ ERROR: attempted to read undefined bytes + panic!("this should never print: {}", x); +} diff --git a/tests/compile-fail/panic.rs b/tests/compile-fail/panic.rs new file mode 100644 index 0000000000000..0d594f9bd4c3b --- /dev/null +++ b/tests/compile-fail/panic.rs @@ -0,0 +1,5 @@ +//error-pattern: the evaluated program panicked + +fn main() { + assert_eq!(5, 6); +} diff --git a/tests/compile-fail/pointer_byte_read_1.rs b/tests/compile-fail/pointer_byte_read_1.rs new file mode 100644 index 0000000000000..342eb28a970fc --- /dev/null +++ b/tests/compile-fail/pointer_byte_read_1.rs @@ -0,0 +1,7 @@ +fn main() { + let x = 13; + let y = &x; + let z = &y as *const &i32 as *const usize; + let ptr_bytes = unsafe { *z }; // the actual deref is fine, because we read the entire pointer at once + let _ = ptr_bytes % 432; //~ ERROR: tried to access part of a pointer value as raw bytes +} diff --git a/tests/compile-fail/pointer_byte_read_2.rs b/tests/compile-fail/pointer_byte_read_2.rs new file mode 100644 index 0000000000000..b0f619332e00c --- /dev/null +++ b/tests/compile-fail/pointer_byte_read_2.rs @@ -0,0 +1,7 @@ +fn main() { + let x = 13; + let y = &x; + let z = &y as *const &i32 as *const u8; + // the deref fails, because we are reading only a part of the pointer + let _ = unsafe { *z }; //~ ERROR: tried to access part of a pointer value as raw bytes +} diff --git a/tests/compile-fail/pointers_to_different_allocations_are_unorderable.rs b/tests/compile-fail/pointers_to_different_allocations_are_unorderable.rs new file mode 100644 index 0000000000000..245b7527c55b2 --- /dev/null +++ b/tests/compile-fail/pointers_to_different_allocations_are_unorderable.rs @@ -0,0 +1,7 @@ +fn main() { + let x: *const u8 = &1; + let y: *const u8 = &2; + if x < y { //~ ERROR: attempted to do invalid arithmetic on pointers + unreachable!() + } +} diff --git a/tests/compile-fail/ptr_bitops.rs b/tests/compile-fail/ptr_bitops.rs new file mode 100644 index 0000000000000..78fd8e912b5e7 --- /dev/null +++ b/tests/compile-fail/ptr_bitops.rs @@ -0,0 +1,7 @@ +fn main() { + let bytes = [0i8, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let one = bytes.as_ptr().wrapping_offset(1); + let three = bytes.as_ptr().wrapping_offset(3); + let res = (one as usize) | (three as usize); //~ ERROR a raw memory access tried to access part of a pointer value as raw bytes + println!("{}", res); +} diff --git a/tests/compile-fail/ptr_int_cast.rs b/tests/compile-fail/ptr_int_cast.rs new file mode 100644 index 0000000000000..396c71ebb03d1 --- /dev/null +++ b/tests/compile-fail/ptr_int_cast.rs @@ -0,0 +1,8 @@ +fn main() { + let x = &1; + // Casting down to u8 and back up to a pointer loses too much precision; this must not work. + let x = x as *const i32; + let x = x as u8; //~ ERROR: a raw memory access tried to access part of a pointer value as raw bytes + let x = x as *const i32; + let _ = unsafe { *x }; +} diff --git a/tests/compile-fail/ptr_offset_overflow.rs b/tests/compile-fail/ptr_offset_overflow.rs new file mode 100644 index 0000000000000..578468c3399bb --- /dev/null +++ b/tests/compile-fail/ptr_offset_overflow.rs @@ -0,0 +1,6 @@ +//error-pattern: overflowing math +fn main() { + let v = [1i8, 2]; + let x = &v[1] as *const i8; + let _ = unsafe { x.offset(isize::min_value()) }; +} diff --git a/tests/compile-fail/reading_half_a_pointer.rs b/tests/compile-fail/reading_half_a_pointer.rs new file mode 100644 index 0000000000000..cc41b52f33372 --- /dev/null +++ b/tests/compile-fail/reading_half_a_pointer.rs @@ -0,0 +1,29 @@ +#![allow(dead_code)] + +// We use packed structs to get around alignment restrictions +#[repr(packed)] +struct Data { + pad: u8, + ptr: &'static i32, +} + +// But we need to gurantee some alignment +struct Wrapper { + align: u64, + data: Data, +} + +static G : i32 = 0; + +fn main() { + let mut w = Wrapper { align: 0, data: Data { pad: 0, ptr: &G } }; + + // Get a pointer to the beginning of the Data struct (one u8 byte, then the pointer bytes). + // Thanks to the wrapper, we know this is aligned-enough to perform a load at ptr size. + // We load at pointer type, so having a relocation is okay -- but here, the relocation + // starts 1 byte to the right, so using it would actually be wrong! + let d_alias = &mut w.data as *mut _ as *mut *const u8; + unsafe { + let _x = *d_alias; //~ ERROR: tried to access part of a pointer value as raw bytes + } +} diff --git a/tests/compile-fail/reallocate-bad-alignment-2.rs b/tests/compile-fail/reallocate-bad-alignment-2.rs new file mode 100644 index 0000000000000..41da885a2c65a --- /dev/null +++ b/tests/compile-fail/reallocate-bad-alignment-2.rs @@ -0,0 +1,17 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +// error-pattern: tried to deallocate or reallocate using incorrect alignment or size + +use alloc::heap::*; +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 1)).unwrap(); + // Try realloc with a too big alignment. + let _y = Heap.realloc(x, Layout::from_size_align_unchecked(1, 2), Layout::from_size_align_unchecked(1, 1)).unwrap(); + } +} diff --git a/tests/compile-fail/reallocate-bad-alignment.rs b/tests/compile-fail/reallocate-bad-alignment.rs new file mode 100644 index 0000000000000..be4bc5589c5c6 --- /dev/null +++ b/tests/compile-fail/reallocate-bad-alignment.rs @@ -0,0 +1,17 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +// error-pattern: tried to deallocate or reallocate using incorrect alignment or size + +use alloc::heap::*; +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 2)).unwrap(); + // Try realloc with a too small alignment. + let _y = Heap.realloc(x, Layout::from_size_align_unchecked(1, 1), Layout::from_size_align_unchecked(1, 2)).unwrap(); + } +} diff --git a/tests/compile-fail/reallocate-bad-size.rs b/tests/compile-fail/reallocate-bad-size.rs new file mode 100644 index 0000000000000..2e5a641838020 --- /dev/null +++ b/tests/compile-fail/reallocate-bad-size.rs @@ -0,0 +1,16 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +// error-pattern: tried to deallocate or reallocate using incorrect alignment or size + +use alloc::heap::*; +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 1)).unwrap(); + let _y = Heap.realloc(x, Layout::from_size_align_unchecked(2, 1), Layout::from_size_align_unchecked(1, 1)).unwrap(); + } +} diff --git a/tests/compile-fail/reallocate-change-alloc.rs b/tests/compile-fail/reallocate-change-alloc.rs new file mode 100644 index 0000000000000..290c966a2bc8a --- /dev/null +++ b/tests/compile-fail/reallocate-change-alloc.rs @@ -0,0 +1,14 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 1)).unwrap(); + let _y = Heap.realloc(x, Layout::from_size_align_unchecked(1, 1), Layout::from_size_align_unchecked(1, 1)).unwrap(); + let _z = *x; //~ ERROR: dangling pointer was dereferenced + } +} diff --git a/tests/compile-fail/reallocate-dangling.rs b/tests/compile-fail/reallocate-dangling.rs new file mode 100644 index 0000000000000..54636b5d2005c --- /dev/null +++ b/tests/compile-fail/reallocate-dangling.rs @@ -0,0 +1,17 @@ +#![feature(alloc, allocator_api)] + +extern crate alloc; + +use alloc::heap::Heap; +use alloc::allocator::*; + +// error-pattern: dangling pointer was dereferenced + +use alloc::heap::*; +fn main() { + unsafe { + let x = Heap.alloc(Layout::from_size_align_unchecked(1, 1)).unwrap(); + Heap.dealloc(x, Layout::from_size_align_unchecked(1, 1)); + Heap.realloc(x, Layout::from_size_align_unchecked(1, 1), Layout::from_size_align_unchecked(1, 1)); + } +} diff --git a/tests/compile-fail/reference_to_packed.rs b/tests/compile-fail/reference_to_packed.rs new file mode 100644 index 0000000000000..4cf353298b9e8 --- /dev/null +++ b/tests/compile-fail/reference_to_packed.rs @@ -0,0 +1,16 @@ +#![allow(dead_code, unused_variables)] + +#[repr(packed)] +struct Foo { + x: i32, + y: i32, +} + +fn main() { + let foo = Foo { + x: 42, + y: 99, + }; + let p = &foo.x; //~ ERROR tried to access memory with alignment 1, but alignment 4 is required + let i = *p; +} diff --git a/tests/compile-fail/repeat.rs b/tests/compile-fail/repeat.rs new file mode 100644 index 0000000000000..abe89e233e7cd --- /dev/null +++ b/tests/compile-fail/repeat.rs @@ -0,0 +1,5 @@ +fn main() { + let data: [u8; std::usize::MAX] = [42; std::usize::MAX]; + //~^ ERROR: rustc layout computation failed: SizeOverflow([u8; + assert_eq!(data.len(), 1024); +} diff --git a/tests/compile-fail/repeat2.rs b/tests/compile-fail/repeat2.rs new file mode 100644 index 0000000000000..d489342b8599c --- /dev/null +++ b/tests/compile-fail/repeat2.rs @@ -0,0 +1,5 @@ +fn main() { + let data: [u8; 1024*1024*1024] = [42; 1024*1024*1024]; + //~^ ERROR: reached the configured maximum execution time + assert_eq!(data.len(), 1024*1024*1024); +} diff --git a/tests/compile-fail/stack_free.rs b/tests/compile-fail/stack_free.rs new file mode 100644 index 0000000000000..08ff7457b76be --- /dev/null +++ b/tests/compile-fail/stack_free.rs @@ -0,0 +1,7 @@ +// error-pattern: tried to deallocate Stack memory but gave Rust as the kind + +fn main() { + let x = 42; + let bad_box = unsafe { std::mem::transmute::<&i32, Box>(&x) }; + drop(bad_box); +} diff --git a/tests/compile-fail/stack_limit.rs b/tests/compile-fail/stack_limit.rs new file mode 100644 index 0000000000000..c6aaf80e6ac00 --- /dev/null +++ b/tests/compile-fail/stack_limit.rs @@ -0,0 +1,20 @@ +#![feature(custom_attribute, attr_literals)] +#![miri(stack_limit=16)] + +//error-pattern: reached the configured maximum number of stack frames + +fn bar() { + foo(); +} + +fn foo() { + cake(); +} + +fn cake() { + bar(); +} + +fn main() { + bar(); +} diff --git a/tests/compile-fail/static_memory_modification.rs b/tests/compile-fail/static_memory_modification.rs new file mode 100644 index 0000000000000..11961becb246a --- /dev/null +++ b/tests/compile-fail/static_memory_modification.rs @@ -0,0 +1,9 @@ +static X: usize = 5; + +#[allow(mutable_transmutes)] +fn main() { + unsafe { + *std::mem::transmute::<&usize, &mut usize>(&X) = 6; //~ ERROR: tried to modify constant memory + assert_eq!(X, 6); + } +} diff --git a/tests/compile-fail/static_memory_modification2.rs b/tests/compile-fail/static_memory_modification2.rs new file mode 100644 index 0000000000000..89d69cf7d7f4b --- /dev/null +++ b/tests/compile-fail/static_memory_modification2.rs @@ -0,0 +1,9 @@ +use std::mem::transmute; + +#[allow(mutable_transmutes)] +fn main() { + unsafe { + let s = "this is a test"; + transmute::<&[u8], &mut [u8]>(s.as_bytes())[4] = 42; //~ ERROR: tried to modify constant memory + } +} diff --git a/tests/compile-fail/static_memory_modification3.rs b/tests/compile-fail/static_memory_modification3.rs new file mode 100644 index 0000000000000..743fbe60efff6 --- /dev/null +++ b/tests/compile-fail/static_memory_modification3.rs @@ -0,0 +1,9 @@ +use std::mem::transmute; + +#[allow(mutable_transmutes)] +fn main() { + unsafe { + let bs = b"this is a test"; + transmute::<&[u8], &mut [u8]>(bs)[4] = 42; //~ ERROR: tried to modify constant memory + } +} diff --git a/tests/compile-fail/timeout.rs b/tests/compile-fail/timeout.rs new file mode 100644 index 0000000000000..edd4c31866910 --- /dev/null +++ b/tests/compile-fail/timeout.rs @@ -0,0 +1,9 @@ +//error-pattern: reached the configured maximum execution time +#![feature(custom_attribute, attr_literals)] +#![miri(step_limit=1000)] + +fn main() { + for i in 0..1000000 { + assert!(i < 1000); + } +} diff --git a/tests/compile-fail/transmute-pair-undef.rs b/tests/compile-fail/transmute-pair-undef.rs new file mode 100644 index 0000000000000..acc6098af7ee0 --- /dev/null +++ b/tests/compile-fail/transmute-pair-undef.rs @@ -0,0 +1,20 @@ +#![feature(core_intrinsics)] + +use std::mem; + +fn main() { + let x: Option> = unsafe { + let z = std::intrinsics::add_with_overflow(0usize, 0usize); + std::mem::transmute::<(usize, bool), Option>>(z) + }; + let y = &x; + // Now read this bytewise. There should be (ptr_size+1) def bytes followed by (ptr_size-1) undef bytes (the padding after the bool) in there. + let z : *const u8 = y as *const _ as *const _; + let first_undef = mem::size_of::() as isize + 1; + for i in 0..first_undef { + let byte = unsafe { *z.offset(i) }; + assert_eq!(byte, 0); + } + let v = unsafe { *z.offset(first_undef) }; + if v == 0 {} //~ ERROR attempted to read undefined bytes +} diff --git a/tests/compile-fail/transmute_fat.rs b/tests/compile-fail/transmute_fat.rs new file mode 100644 index 0000000000000..6b9e6f876481d --- /dev/null +++ b/tests/compile-fail/transmute_fat.rs @@ -0,0 +1,13 @@ +#![feature(i128_type)] + +fn main() { + #[cfg(target_pointer_width="64")] + let bad = unsafe { + std::mem::transmute::<&[u8], u128>(&[1u8]) + }; + #[cfg(target_pointer_width="32")] + let bad = unsafe { + std::mem::transmute::<&[u8], u64>(&[1u8]) + }; + bad + 1; //~ ERROR a raw memory access tried to access part of a pointer value as raw bytes +} diff --git a/tests/compile-fail/transmute_fat2.rs b/tests/compile-fail/transmute_fat2.rs new file mode 100644 index 0000000000000..028ed613eee71 --- /dev/null +++ b/tests/compile-fail/transmute_fat2.rs @@ -0,0 +1,13 @@ +#![feature(i128_type)] + +fn main() { + #[cfg(target_pointer_width="64")] + let bad = unsafe { + std::mem::transmute::(42) + }; + #[cfg(target_pointer_width="32")] + let bad = unsafe { + std::mem::transmute::(42) + }; + bad[0]; //~ ERROR index out of bounds: the len is 0 but the index is 0 +} diff --git a/tests/compile-fail/unaligned_ptr_cast.rs b/tests/compile-fail/unaligned_ptr_cast.rs new file mode 100644 index 0000000000000..fcab430f8fcbc --- /dev/null +++ b/tests/compile-fail/unaligned_ptr_cast.rs @@ -0,0 +1,6 @@ +fn main() { + let x = &2u16; + let x = x as *const _ as *const u32; + // This must fail because alignment is violated + let _ = unsafe { &*x }; //~ ERROR: tried to access memory with alignment 2, but alignment 4 is required +} diff --git a/tests/compile-fail/wild_pointer_deref.rs b/tests/compile-fail/wild_pointer_deref.rs new file mode 100644 index 0000000000000..373e308e1c02d --- /dev/null +++ b/tests/compile-fail/wild_pointer_deref.rs @@ -0,0 +1,5 @@ +fn main() { + let p = 42 as *const i32; + let x = unsafe { *p }; //~ ERROR: a memory access tried to interpret some bytes as a pointer + panic!("this should never print: {}", x); +} diff --git a/tests/compile-fail/zst.rs b/tests/compile-fail/zst.rs new file mode 100644 index 0000000000000..3439824047943 --- /dev/null +++ b/tests/compile-fail/zst.rs @@ -0,0 +1,4 @@ +fn main() { + let x = &() as *const () as *const i32; + let _ = unsafe { *x }; //~ ERROR: tried to access memory with alignment 1, but alignment 4 is required +} diff --git a/tests/compile-fail/zst2.rs b/tests/compile-fail/zst2.rs new file mode 100644 index 0000000000000..dd826c2fd74eb --- /dev/null +++ b/tests/compile-fail/zst2.rs @@ -0,0 +1,9 @@ +// error-pattern: the evaluated program panicked + +#[derive(Debug)] +struct A; + +fn main() { + // can't use assert_eq, b/c that will try to print the pointer addresses with full MIR enabled + assert!(&A as *const A as *const () == &() as *const _) +} diff --git a/tests/compile-fail/zst3.rs b/tests/compile-fail/zst3.rs new file mode 100644 index 0000000000000..53c42995b8a12 --- /dev/null +++ b/tests/compile-fail/zst3.rs @@ -0,0 +1,9 @@ +// error-pattern: the evaluated program panicked + +#[derive(Debug)] +struct A; + +fn main() { + // can't use assert_eq, b/c that will try to print the pointer addresses with full MIR enabled + assert!(&A as *const A == &A as *const A); +} diff --git a/tests/compiletest.rs b/tests/compiletest.rs new file mode 100644 index 0000000000000..fbf8dbd8c92cc --- /dev/null +++ b/tests/compiletest.rs @@ -0,0 +1,259 @@ +extern crate compiletest_rs as compiletest; + +use std::path::{PathBuf, Path}; +use std::io::Write; + +macro_rules! eprintln { + ($($arg:tt)*) => { + let stderr = std::io::stderr(); + writeln!(stderr.lock(), $($arg)*).unwrap(); + } +} + +fn compile_fail(sysroot: &Path, path: &str, target: &str, host: &str, fullmir: bool) { + eprintln!("## Running compile-fail tests in {} against miri for target {}", path, target); + let mut config = compiletest::default_config(); + config.mode = "compile-fail".parse().expect("Invalid mode"); + config.rustc_path = "target/debug/miri".into(); + if fullmir { + if host != target { + // skip fullmir on nonhost + return; + } + let sysroot = Path::new(&std::env::var("HOME").unwrap()).join(".xargo").join("HOST"); + config.target_rustcflags = Some(format!("--sysroot {}", sysroot.to_str().unwrap())); + config.src_base = PathBuf::from(path.to_string()); + } else { + config.target_rustcflags = Some(format!("--sysroot {}", sysroot.to_str().unwrap())); + config.src_base = PathBuf::from(path.to_string()); + } + config.target = target.to_owned(); + compiletest::run_tests(&config); +} + +fn run_pass(path: &str) { + eprintln!("## Running run-pass tests in {} against rustc", path); + let mut config = compiletest::default_config(); + config.mode = "run-pass".parse().expect("Invalid mode"); + config.src_base = PathBuf::from(path); + config.target_rustcflags = Some("-Dwarnings".to_string()); + config.host_rustcflags = Some("-Dwarnings".to_string()); + compiletest::run_tests(&config); +} + +fn miri_pass(path: &str, target: &str, host: &str, fullmir: bool) { + eprintln!("## Running run-pass tests in {} against miri for target {}", path, target); + let mut config = compiletest::default_config(); + config.mode = "mir-opt".parse().expect("Invalid mode"); + config.src_base = PathBuf::from(path); + config.target = target.to_owned(); + config.host = host.to_owned(); + config.rustc_path = PathBuf::from("target/debug/miri"); + if fullmir { + if host != target { + // skip fullmir on nonhost + return; + } + let sysroot = Path::new(&std::env::var("HOME").unwrap()).join(".xargo").join("HOST"); + config.target_rustcflags = Some(format!("--sysroot {}", sysroot.to_str().unwrap())); + } + // don't actually execute the final binary, it might be for other targets and we only care + // about running miri, not the binary. + config.runtool = Some("echo \"\" || ".to_owned()); + if target == host { + std::env::set_var("MIRI_HOST_TARGET", "yes"); + } + compiletest::run_tests(&config); + std::env::set_var("MIRI_HOST_TARGET", ""); +} + +fn is_target_dir>(path: P) -> bool { + let mut path = path.into(); + path.push("lib"); + path.metadata().map(|m| m.is_dir()).unwrap_or(false) +} + +fn for_all_targets(sysroot: &Path, mut f: F) { + let target_dir = sysroot.join("lib").join("rustlib"); + for entry in std::fs::read_dir(target_dir).expect("invalid sysroot") { + let entry = entry.unwrap(); + if !is_target_dir(entry.path()) { continue; } + let target = entry.file_name().into_string().unwrap(); + f(target); + } +} + +#[test] +fn compile_test() { + let sysroot = std::env::var("MIRI_SYSROOT").unwrap_or_else(|_| { + let sysroot = std::process::Command::new("rustc") + .arg("--print") + .arg("sysroot") + .output() + .expect("rustc not found") + .stdout; + String::from_utf8(sysroot).expect("sysroot is not utf8") + }); + let sysroot = &Path::new(sysroot.trim()); + let host = std::process::Command::new("rustc") + .arg("-vV") + .output() + .expect("rustc not found for -vV") + .stdout; + let host = std::str::from_utf8(&host).expect("sysroot is not utf8"); + let host = host.split("\nhost: ").nth(1).expect("no host: part in rustc -vV"); + let host = host.split('\n').next().expect("no \n after host"); + + if let Ok(path) = std::env::var("MIRI_RUSTC_TEST") { + let mut mir_not_found = Vec::new(); + let mut crate_not_found = Vec::new(); + let mut success = 0; + let mut failed = Vec::new(); + let mut c_abi_fns = Vec::new(); + let mut abi = Vec::new(); + let mut unsupported = Vec::new(); + let mut unimplemented_intrinsic = Vec::new(); + let mut limits = Vec::new(); + let mut files: Vec<_> = std::fs::read_dir(path).unwrap().collect(); + while let Some(file) = files.pop() { + let file = file.unwrap(); + let path = file.path(); + if file.metadata().unwrap().is_dir() { + if !path.to_str().unwrap().ends_with("auxiliary") { + // add subdirs recursively + files.extend(std::fs::read_dir(path).unwrap()); + } + continue; + } + if !file.metadata().unwrap().is_file() || !path.to_str().unwrap().ends_with(".rs") { + continue; + } + let stderr = std::io::stderr(); + write!(stderr.lock(), "test [miri-pass] {} ... ", path.display()).unwrap(); + let mut cmd = std::process::Command::new("target/debug/miri"); + cmd.arg(path); + let libs = Path::new(&sysroot).join("lib"); + let sysroot = libs.join("rustlib").join(&host).join("lib"); + let paths = std::env::join_paths(&[libs, sysroot]).unwrap(); + cmd.env(compiletest::procsrv::dylib_env_var(), paths); + cmd.env("MIRI_SYSROOT", Path::new(&std::env::var("HOME").unwrap()).join(".xargo").join("HOST")); + + match cmd.output() { + Ok(ref output) if output.status.success() => { + success += 1; + writeln!(stderr.lock(), "ok").unwrap() + }, + Ok(output) => { + let output_err = std::str::from_utf8(&output.stderr).unwrap(); + if let Some(text) = output_err.splitn(2, "no mir for `").nth(1) { + let end = text.find('`').unwrap(); + mir_not_found.push(text[..end].to_string()); + writeln!(stderr.lock(), "NO MIR FOR `{}`", &text[..end]).unwrap(); + } else if let Some(text) = output_err.splitn(2, "can't find crate for `").nth(1) { + let end = text.find('`').unwrap(); + crate_not_found.push(text[..end].to_string()); + writeln!(stderr.lock(), "CAN'T FIND CRATE FOR `{}`", &text[..end]).unwrap(); + } else { + for text in output_err.split("error: ").skip(1) { + let end = text.find('\n').unwrap_or(text.len()); + let c_abi = "can't call C ABI function: "; + let unimplemented_intrinsic_s = "unimplemented intrinsic: "; + let unsupported_s = "miri does not support "; + let abi_s = "can't handle function with "; + let limit_s = "reached the configured maximum "; + if text.starts_with(c_abi) { + c_abi_fns.push(text[c_abi.len()..end].to_string()); + } else if text.starts_with(unimplemented_intrinsic_s) { + unimplemented_intrinsic.push(text[unimplemented_intrinsic_s.len()..end].to_string()); + } else if text.starts_with(unsupported_s) { + unsupported.push(text[unsupported_s.len()..end].to_string()); + } else if text.starts_with(abi_s) { + abi.push(text[abi_s.len()..end].to_string()); + } else if text.starts_with(limit_s) { + limits.push(text[limit_s.len()..end].to_string()); + } else if text.find("aborting").is_none() { + failed.push(text[..end].to_string()); + } + } + writeln!(stderr.lock(), "FAILED with exit code {:?}", output.status.code()).unwrap(); + writeln!(stderr.lock(), "stdout: \n {}", std::str::from_utf8(&output.stdout).unwrap()).unwrap(); + writeln!(stderr.lock(), "stderr: \n {}", output_err).unwrap(); + } + } + Err(e) => { + writeln!(stderr.lock(), "FAILED: {}", e).unwrap(); + panic!("failed to execute miri"); + }, + } + } + let stderr = std::io::stderr(); + let mut stderr = stderr.lock(); + writeln!(stderr, "{} success, {} no mir, {} crate not found, {} failed, \ + {} C fn, {} ABI, {} unsupported, {} intrinsic", + success, mir_not_found.len(), crate_not_found.len(), failed.len(), + c_abi_fns.len(), abi.len(), unsupported.len(), unimplemented_intrinsic.len()).unwrap(); + writeln!(stderr, "# The \"other reasons\" errors").unwrap(); + writeln!(stderr, "(sorted, deduplicated)").unwrap(); + print_vec(&mut stderr, failed); + + writeln!(stderr, "# can't call C ABI function").unwrap(); + print_vec(&mut stderr, c_abi_fns); + + writeln!(stderr, "# unsupported ABI").unwrap(); + print_vec(&mut stderr, abi); + + writeln!(stderr, "# unsupported").unwrap(); + print_vec(&mut stderr, unsupported); + + writeln!(stderr, "# unimplemented intrinsics").unwrap(); + print_vec(&mut stderr, unimplemented_intrinsic); + + writeln!(stderr, "# mir not found").unwrap(); + print_vec(&mut stderr, mir_not_found); + + writeln!(stderr, "# crate not found").unwrap(); + print_vec(&mut stderr, crate_not_found); + + panic!("ran miri on rustc test suite. Test failing for convenience"); + } else { + run_pass("tests/run-pass"); + run_pass("tests/run-pass-fullmir"); + for_all_targets(sysroot, |target| { + miri_pass("tests/run-pass", &target, host, false); + compile_fail(sysroot, "tests/compile-fail", &target, host, false); + }); + miri_pass("tests/run-pass-fullmir", host, host, true); + compile_fail(sysroot, "tests/compile-fail-fullmir", host, host, true); + } +} + +fn print_vec(stderr: &mut W, v: Vec) { + writeln!(stderr, "```").unwrap(); + for (n, s) in vec_to_hist(v).into_iter().rev() { + writeln!(stderr, "{:4} {}", n, s).unwrap(); + } + writeln!(stderr, "```").unwrap(); +} + +fn vec_to_hist(mut v: Vec) -> Vec<(usize, T)> { + v.sort(); + let mut v = v.into_iter(); + let mut result = Vec::new(); + let mut current = v.next(); + 'outer: while let Some(current_val) = current { + let mut n = 1; + for next in &mut v { + if next == current_val { + n += 1; + } else { + result.push((n, current_val)); + current = Some(next); + continue 'outer; + } + } + result.push((n, current_val)); + break; + } + result.sort(); + result +} diff --git a/tests/run-pass-fullmir/foreign-fn-linkname.rs b/tests/run-pass-fullmir/foreign-fn-linkname.rs new file mode 100644 index 0000000000000..b569cd0a66291 --- /dev/null +++ b/tests/run-pass-fullmir/foreign-fn-linkname.rs @@ -0,0 +1,38 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + + +#![feature(libc)] + +extern crate libc; +use std::ffi::CString; + +mod mlibc { + use libc::{c_char, size_t}; + + extern { + #[link_name = "strlen"] + pub fn my_strlen(str: *const c_char) -> size_t; + } +} + +fn strlen(str: String) -> usize { + // C string is terminated with a zero + let s = CString::new(str).unwrap(); + unsafe { + mlibc::my_strlen(s.as_ptr()) as usize + } +} + +pub fn main() { + let len = strlen("Rust".to_string()); + assert_eq!(len, 4); +} diff --git a/tests/run-pass-fullmir/hashmap.rs b/tests/run-pass-fullmir/hashmap.rs new file mode 100644 index 0000000000000..f4a358174f555 --- /dev/null +++ b/tests/run-pass-fullmir/hashmap.rs @@ -0,0 +1,25 @@ +use std::collections::{self, HashMap}; +use std::hash::BuildHasherDefault; + +fn main() { + let mut map : HashMap> = Default::default(); + map.insert(0, 0); + assert_eq!(map.values().fold(0, |x, y| x+y), 0); + + let table_base = map.get(&0).unwrap() as *const _; + + let num = 22; // large enough to trigger a resize + for i in 1..num { + map.insert(i, i); + } + assert!(table_base != map.get(&0).unwrap() as *const _); // make sure relocation happened + assert_eq!(map.values().fold(0, |x, y| x+y), num*(num-1)/2); // check the right things are in the table now + + // Inserting again replaces the existing entries + for i in 0..num { + map.insert(i, num-1-i); + } + assert_eq!(map.values().fold(0, |x, y| x+y), num*(num-1)/2); + + // TODO: Test Entry API +} diff --git a/tests/run-pass-fullmir/heap.rs b/tests/run-pass-fullmir/heap.rs new file mode 100644 index 0000000000000..b533f91646988 --- /dev/null +++ b/tests/run-pass-fullmir/heap.rs @@ -0,0 +1,34 @@ +#![feature(box_syntax)] + +fn make_box() -> Box<(i16, i16)> { + Box::new((1, 2)) +} + +fn make_box_syntax() -> Box<(i16, i16)> { + box (1, 2) +} + +fn allocate_reallocate() { + let mut s = String::new(); + + // 6 byte heap alloc (__rust_allocate) + s.push_str("foobar"); + assert_eq!(s.len(), 6); + assert_eq!(s.capacity(), 6); + + // heap size doubled to 12 (__rust_reallocate) + s.push_str("baz"); + assert_eq!(s.len(), 9); + assert_eq!(s.capacity(), 12); + + // heap size reduced to 9 (__rust_reallocate) + s.shrink_to_fit(); + assert_eq!(s.len(), 9); + assert_eq!(s.capacity(), 9); +} + +fn main() { + assert_eq!(*make_box(), (1, 2)); + assert_eq!(*make_box_syntax(), (1, 2)); + allocate_reallocate(); +} diff --git a/tests/run-pass-fullmir/integer-ops.rs b/tests/run-pass-fullmir/integer-ops.rs new file mode 100644 index 0000000000000..3773e699ddf36 --- /dev/null +++ b/tests/run-pass-fullmir/integer-ops.rs @@ -0,0 +1,167 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::i32; + +pub fn main() { + assert_eq!(i8::min_value(), -128); + + assert_eq!(i8::max_value(), 127); + + assert_eq!(i32::from_str_radix("A", 16), Ok(10)); + + let n = -0b1000_0000i8; + assert_eq!(n.count_ones(), 1); + + let n = -0b1000_0000i8; + assert_eq!(n.count_zeros(), 7); + + let n = -1i16; + assert_eq!(n.leading_zeros(), 0); + + let n = -4i8; + assert_eq!(n.trailing_zeros(), 2); + + let n = 0x0123456789ABCDEFi64; + let m = -0x76543210FEDCBA99i64; + assert_eq!(n.rotate_left(32), m); + + let n = 0x0123456789ABCDEFi64; + let m = -0xFEDCBA987654322i64; + assert_eq!(n.rotate_right(4), m); + + let n = 0x0123456789ABCDEFi64; + let m = -0x1032547698BADCFFi64; + assert_eq!(n.swap_bytes(), m); + + let n = 0x0123456789ABCDEFi64; + if cfg!(target_endian = "big") { + assert_eq!(i64::from_be(n), n) + } else { + assert_eq!(i64::from_be(n), n.swap_bytes()) + } + + let n = 0x0123456789ABCDEFi64; + if cfg!(target_endian = "little") { + assert_eq!(i64::from_le(n), n) + } else { + assert_eq!(i64::from_le(n), n.swap_bytes()) + } + + let n = 0x0123456789ABCDEFi64; + if cfg!(target_endian = "big") { + assert_eq!(n.to_be(), n) + } else { + assert_eq!(n.to_be(), n.swap_bytes()) + } + + let n = 0x0123456789ABCDEFi64; + if cfg!(target_endian = "little") { + assert_eq!(n.to_le(), n) + } else { + assert_eq!(n.to_le(), n.swap_bytes()) + } + + assert_eq!(7i16.checked_add(32760), Some(32767)); + assert_eq!(8i16.checked_add(32760), None); + + assert_eq!((-127i8).checked_sub(1), Some(-128)); + assert_eq!((-128i8).checked_sub(1), None); + + assert_eq!(6i8.checked_mul(21), Some(126)); + assert_eq!(6i8.checked_mul(22), None); + + assert_eq!((-127i8).checked_div(-1), Some(127)); + assert_eq!((-128i8).checked_div(-1), None); + assert_eq!((1i8).checked_div(0), None); + + assert_eq!(5i32.checked_rem(2), Some(1)); + assert_eq!(5i32.checked_rem(0), None); + assert_eq!(i32::MIN.checked_rem(-1), None); + + assert_eq!(5i32.checked_neg(), Some(-5)); + assert_eq!(i32::MIN.checked_neg(), None); + + assert_eq!(0x10i32.checked_shl(4), Some(0x100)); + assert_eq!(0x10i32.checked_shl(33), None); + + assert_eq!(0x10i32.checked_shr(4), Some(0x1)); + assert_eq!(0x10i32.checked_shr(33), None); + + assert_eq!((-5i32).checked_abs(), Some(5)); + assert_eq!(i32::MIN.checked_abs(), None); + + assert_eq!(100i8.saturating_add(1), 101); + assert_eq!(100i8.saturating_add(127), 127); + + assert_eq!(100i8.saturating_sub(127), -27); + assert_eq!((-100i8).saturating_sub(127), -128); + + assert_eq!(100i32.saturating_mul(127), 12700); + assert_eq!((1i32 << 23).saturating_mul(1 << 23), i32::MAX); + assert_eq!((-1i32 << 23).saturating_mul(1 << 23), i32::MIN); + + assert_eq!(100i8.wrapping_add(27), 127); + assert_eq!(100i8.wrapping_add(127), -29); + + assert_eq!(0i8.wrapping_sub(127), -127); + assert_eq!((-2i8).wrapping_sub(127), 127); + + assert_eq!(10i8.wrapping_mul(12), 120); + assert_eq!(11i8.wrapping_mul(12), -124); + + assert_eq!(100u8.wrapping_div(10), 10); + assert_eq!((-128i8).wrapping_div(-1), -128); + + assert_eq!(100i8.wrapping_rem(10), 0); + assert_eq!((-128i8).wrapping_rem(-1), 0); + + assert_eq!(100i8.wrapping_neg(), -100); + assert_eq!((-128i8).wrapping_neg(), -128); + + assert_eq!((-1i8).wrapping_shl(7), -128); + assert_eq!((-1i8).wrapping_shl(8), -1); + + assert_eq!((-128i8).wrapping_shr(7), -1); + assert_eq!((-128i8).wrapping_shr(8), -128); + + assert_eq!(100i8.wrapping_abs(), 100); + assert_eq!((-100i8).wrapping_abs(), 100); + assert_eq!((-128i8).wrapping_abs(), -128); + assert_eq!((-128i8).wrapping_abs() as u8, 128); + + assert_eq!(5i32.overflowing_add(2), (7, false)); + assert_eq!(i32::MAX.overflowing_add(1), (i32::MIN, true)); + + assert_eq!(5i32.overflowing_sub(2), (3, false)); + assert_eq!(i32::MIN.overflowing_sub(1), (i32::MAX, true)); + + assert_eq!(5i32.overflowing_mul(2), (10, false)); + assert_eq!(1_000_000_000i32.overflowing_mul(10), (1410065408, true)); + + assert_eq!(5i32.overflowing_div(2), (2, false)); + assert_eq!(i32::MIN.overflowing_div(-1), (i32::MIN, true)); + + assert_eq!(5i32.overflowing_rem(2), (1, false)); + assert_eq!(i32::MIN.overflowing_rem(-1), (0, true)); + + assert_eq!(2i32.overflowing_neg(), (-2, false)); + assert_eq!(i32::MIN.overflowing_neg(), (i32::MIN, true)); + + assert_eq!(0x10i32.overflowing_shl(4), (0x100, false)); + assert_eq!(0x10i32.overflowing_shl(36), (0x100, true)); + + assert_eq!(0x10i32.overflowing_shr(4), (0x1, false)); + assert_eq!(0x10i32.overflowing_shr(36), (0x1, true)); + + assert_eq!(10i8.overflowing_abs(), (10,false)); + assert_eq!((-10i8).overflowing_abs(), (10,false)); + assert_eq!((-128i8).overflowing_abs(), (-128,true)); +} diff --git a/tests/run-pass-fullmir/issue-15080.rs b/tests/run-pass-fullmir/issue-15080.rs new file mode 100644 index 0000000000000..cee0caeb465f5 --- /dev/null +++ b/tests/run-pass-fullmir/issue-15080.rs @@ -0,0 +1,33 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +#![feature(slice_patterns)] + +fn main() { + let mut x: &[_] = &[1, 2, 3, 4]; + + let mut result = vec!(); + loop { + x = match *x { + [1, n, 3, ref rest..] => { + result.push(n); + rest + } + [n, ref rest..] => { + result.push(n); + rest + } + [] => + break + } + } + assert_eq!(result, [2, 4]); +} diff --git a/tests/run-pass-fullmir/loop-break-value.rs b/tests/run-pass-fullmir/loop-break-value.rs new file mode 100644 index 0000000000000..8631909a2a966 --- /dev/null +++ b/tests/run-pass-fullmir/loop-break-value.rs @@ -0,0 +1,141 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(never_type)] +#![allow(unreachable_code)] + +#[allow(unused)] +fn never_returns() { + loop { + break loop {}; + } +} + +pub fn main() { + let value = 'outer: loop { + if 1 == 1 { + break 13; + } else { + let _never: ! = loop { + break loop { + break 'outer panic!(); + } + }; + } + }; + assert_eq!(value, 13); + + let x = [1, 3u32, 5]; + let y = [17]; + let z = []; + let coerced: &[_] = loop { + match 2 { + 1 => break &x, + 2 => break &y, + 3 => break &z, + _ => (), + } + }; + assert_eq!(coerced, &[17u32]); + + let trait_unified = loop { + break if true { + break Default::default() + } else { + break [13, 14] + }; + }; + assert_eq!(trait_unified, [0, 0]); + + let trait_unified_2 = loop { + if false { + break [String::from("Hello")] + } else { + break Default::default() + }; + }; + assert_eq!(trait_unified_2, [""]); + + let trait_unified_3 = loop { + break if false { + break [String::from("Hello")] + } else { + ["Yes".into()] + }; + }; + assert_eq!(trait_unified_3, ["Yes"]); + + let regular_break = loop { + if true { + break; + } else { + break break Default::default(); + } + }; + assert_eq!(regular_break, ()); + + let regular_break_2 = loop { + if true { + break Default::default(); + } else { + break; + } + }; + assert_eq!(regular_break_2, ()); + + let regular_break_3 = loop { + break if true { + Default::default() + } else { + break; + } + }; + assert_eq!(regular_break_3, ()); + + let regular_break_4 = loop { + break (); + break; + }; + assert_eq!(regular_break_4, ()); + + let regular_break_5 = loop { + break; + break (); + }; + assert_eq!(regular_break_5, ()); + + let nested_break_value = 'outer2: loop { + let _a: u32 = 'inner: loop { + if true { + break 'outer2 "hello"; + } else { + break 'inner 17; + } + }; + panic!(); + }; + assert_eq!(nested_break_value, "hello"); + + let break_from_while_cond = loop { + 'inner_loop: while break 'inner_loop { + panic!(); + } + break 123; + }; + assert_eq!(break_from_while_cond, 123); + + let break_from_while_to_outer = 'outer_loop: loop { + while break 'outer_loop 567 { + panic!("from_inner"); + } + panic!("from outer"); + }; + assert_eq!(break_from_while_to_outer, 567); +} diff --git a/tests/run-pass-fullmir/move-arg-2-unique.rs b/tests/run-pass-fullmir/move-arg-2-unique.rs new file mode 100644 index 0000000000000..d44c83763b7c4 --- /dev/null +++ b/tests/run-pass-fullmir/move-arg-2-unique.rs @@ -0,0 +1,20 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(unused_features, unused_variables)] +#![feature(box_syntax)] + +fn test(foo: Box> ) { assert_eq!((*foo)[0], 10); } + +pub fn main() { + let x = box vec![10]; + // Test forgetting a local by move-in + test(x); +} diff --git a/tests/run-pass-fullmir/regions-mock-trans.rs b/tests/run-pass-fullmir/regions-mock-trans.rs new file mode 100644 index 0000000000000..7d9d31b0dda19 --- /dev/null +++ b/tests/run-pass-fullmir/regions-mock-trans.rs @@ -0,0 +1,63 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// pretty-expanded FIXME #23616 + +#![feature(libc)] + +#![allow(dead_code)] + +extern crate libc; +use std::mem; + +struct Arena(()); + +struct Bcx<'a> { + fcx: &'a Fcx<'a> +} + +struct Fcx<'a> { + arena: &'a Arena, + ccx: &'a Ccx +} + +struct Ccx { + x: isize +} + +fn alloc<'a>(_bcx : &'a Arena) -> &'a Bcx<'a> { + unsafe { + mem::transmute(libc::malloc(mem::size_of::>() + as libc::size_t)) + } +} + +fn h<'a>(bcx : &'a Bcx<'a>) -> &'a Bcx<'a> { + return alloc(bcx.fcx.arena); +} + +fn g(fcx : &Fcx) { + let bcx = Bcx { fcx: fcx }; + let bcx2 = h(&bcx); + unsafe { + libc::free(mem::transmute(bcx2)); + } +} + +fn f(ccx : &Ccx) { + let a = Arena(()); + let fcx = Fcx { arena: &a, ccx: ccx }; + return g(&fcx); +} + +pub fn main() { + let ccx = Ccx { x: 0 }; + f(&ccx); +} diff --git a/tests/run-pass-fullmir/u128.rs b/tests/run-pass-fullmir/u128.rs new file mode 100644 index 0000000000000..a05308acbe676 --- /dev/null +++ b/tests/run-pass-fullmir/u128.rs @@ -0,0 +1,77 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(i128_type)] + +fn b(t: T) -> T { t } + +fn main() { + let x: u128 = 0xFFFF_FFFF_FFFF_FFFF__FFFF_FFFF_FFFF_FFFF; + assert_eq!(0, !x); + assert_eq!(0, !x); + let y: u128 = 0xFFFF_FFFF_FFFF_FFFF__FFFF_FFFF_FFFF_FFFE; + assert_eq!(!1, y); + assert_eq!(x, y | 1); + assert_eq!(0xFAFF_0000_FF8F_0000__FFFF_0000_FFFF_FFFE, + y & + 0xFAFF_0000_FF8F_0000__FFFF_0000_FFFF_FFFF); + let z: u128 = 0xABCD_EF; + assert_eq!(z * z, 0x734C_C2F2_A521); + assert_eq!(z * z * z * z, 0x33EE_0E2A_54E2_59DA_A0E7_8E41); + assert_eq!(z + z + z + z, 0x2AF3_7BC); + let k: u128 = 0x1234_5678_9ABC_DEFF_EDCB_A987_6543_210; + assert_eq!(k + k, 0x2468_ACF1_3579_BDFF_DB97_530E_CA86_420); + assert_eq!(0, k - k); + assert_eq!(0x1234_5678_9ABC_DEFF_EDCB_A987_5A86_421, k - z); + assert_eq!(0x1000_0000_0000_0000_0000_0000_0000_000, + k - 0x234_5678_9ABC_DEFF_EDCB_A987_6543_210); + assert_eq!(0x6EF5_DE4C_D3BC_2AAA_3BB4_CC5D_D6EE_8, k / 42); + assert_eq!(0, k % 42); + assert_eq!(15, z % 42); + assert_eq!(0x169D_A8020_CEC18, k % 0x3ACB_FE49_FF24_AC); + assert_eq!(0x91A2_B3C4_D5E6_F7, k >> 65); + assert_eq!(0xFDB9_7530_ECA8_6420_0000_0000_0000_0000, k << 65); + assert!(k > z); + assert!(y > k); + assert!(y < x); + assert_eq!(x as u64, !0); + assert_eq!(z as u64, 0xABCD_EF); + assert_eq!(k as u64, 0xFEDC_BA98_7654_3210); + assert_eq!(k as i128, 0x1234_5678_9ABC_DEFF_EDCB_A987_6543_210); + assert_eq!((z as f64) as u128, z); + assert_eq!((z as f32) as u128, z); + assert_eq!((z as f64 * 16.0) as u128, z * 16); + assert_eq!((z as f32 * 16.0) as u128, z * 16); + let l :u128 = 432 << 100; + assert_eq!((l as f32) as u128, l); + assert_eq!((l as f64) as u128, l); + // formatting + let j: u128 = 1 << 67; + assert_eq!("147573952589676412928", format!("{}", j)); + assert_eq!("80000000000000000", format!("{:x}", j)); + assert_eq!("20000000000000000000000", format!("{:o}", j)); + assert_eq!("10000000000000000000000000000000000000000000000000000000000000000000", + format!("{:b}", j)); + assert_eq!("340282366920938463463374607431768211455", + format!("{}", u128::max_value())); + assert_eq!("147573952589676412928", format!("{:?}", j)); + // common traits + assert_eq!(x, b(x.clone())); + // overflow checks + assert_eq!((z).checked_mul(z), Some(0x734C_C2F2_A521)); + assert_eq!((k).checked_mul(k), None); + let l: u128 = b(u128::max_value() - 10); + let o: u128 = b(17); + assert_eq!(l.checked_add(b(11)), None); + assert_eq!(l.checked_sub(l), Some(0)); + assert_eq!(o.checked_sub(b(18)), None); + assert_eq!(b(1u128).checked_shl(b(127)), Some(1 << 127)); + assert_eq!(o.checked_shl(b(128)), None); +} diff --git a/tests/run-pass-fullmir/vecs.rs b/tests/run-pass-fullmir/vecs.rs new file mode 100644 index 0000000000000..fd3a00031d61e --- /dev/null +++ b/tests/run-pass-fullmir/vecs.rs @@ -0,0 +1,42 @@ +fn make_vec() -> Vec { + let mut v = Vec::with_capacity(4); + v.push(1); + v.push(2); + v +} + +fn make_vec_macro() -> Vec { + vec![1, 2] +} + +fn make_vec_macro_repeat() -> Vec { + vec![42; 5] +} + +fn make_vec_macro_repeat_zeroed() -> Vec { + vec![0; 7] +} + +fn vec_into_iter() -> u8 { + vec![1, 2, 3, 4] + .into_iter() + .map(|x| x * x) + .fold(0, |x, y| x + y) +} + +fn vec_reallocate() -> Vec { + let mut v = vec![1, 2]; + v.push(3); + v.push(4); + v.push(5); + v +} + +fn main() { + assert_eq!(vec_reallocate().len(), 5); + assert_eq!(vec_into_iter(), 30); + assert_eq!(make_vec().capacity(), 4); + assert_eq!(make_vec_macro(), [1, 2]); + assert_eq!(make_vec_macro_repeat(), [42; 5]); + assert_eq!(make_vec_macro_repeat_zeroed(), [0; 7]); +} diff --git a/tests/run-pass/arrays.rs b/tests/run-pass/arrays.rs new file mode 100644 index 0000000000000..469dde3091eb2 --- /dev/null +++ b/tests/run-pass/arrays.rs @@ -0,0 +1,45 @@ +fn empty_array() -> [u16; 0] { + [] +} + +fn mini_array() -> [u16; 1] { + [42] +} + +fn big_array() -> [u16; 5] { + [5, 4, 3, 2, 1] +} + +fn array_array() -> [[u8; 2]; 3] { + [[5, 4], [3, 2], [1, 0]] +} + +fn index_unsafe() -> i32 { + let a = [0, 10, 20, 30]; + unsafe { *a.get_unchecked(2) } +} + +fn index() -> i32 { + let a = [0, 10, 20, 30]; + a[2] +} + +fn array_repeat() -> [u8; 8] { + [42; 8] +} + +fn slice_index() -> u8 { + let arr: &[_] = &[101, 102, 103, 104, 105, 106]; + arr[5] +} + +fn main() { + assert_eq!(empty_array(), []); + assert_eq!(index_unsafe(), 20); + assert_eq!(index(), 20); + assert_eq!(slice_index(), 106); + assert_eq!(big_array(), [5, 4, 3, 2, 1]); + assert_eq!(array_array(), [[5, 4], [3, 2], [1, 0]]); + assert_eq!(array_repeat(), [42; 8]); + assert_eq!(mini_array(), [42]); +} diff --git a/tests/run-pass/associated-const.rs b/tests/run-pass/associated-const.rs new file mode 100644 index 0000000000000..fe5da49f807d5 --- /dev/null +++ b/tests/run-pass/associated-const.rs @@ -0,0 +1,21 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +trait Foo { + const ID: i32; +} + +impl Foo for i32 { + const ID: i32 = 1; +} + +fn main() { + assert_eq!(1, ::ID); +} diff --git a/tests/run-pass/assume_bug.rs b/tests/run-pass/assume_bug.rs new file mode 100644 index 0000000000000..e14f875c022e3 --- /dev/null +++ b/tests/run-pass/assume_bug.rs @@ -0,0 +1,3 @@ +fn main() { + vec![()].into_iter(); +} diff --git a/tests/run-pass/atomic-access-bool.rs b/tests/run-pass/atomic-access-bool.rs new file mode 100644 index 0000000000000..ada584705401f --- /dev/null +++ b/tests/run-pass/atomic-access-bool.rs @@ -0,0 +1,30 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT}; +use std::sync::atomic::Ordering::*; + +static mut ATOMIC: AtomicBool = ATOMIC_BOOL_INIT; + +fn main() { + unsafe { + assert_eq!(*ATOMIC.get_mut(), false); + ATOMIC.store(true, SeqCst); + assert_eq!(*ATOMIC.get_mut(), true); + ATOMIC.fetch_or(false, SeqCst); + assert_eq!(*ATOMIC.get_mut(), true); + ATOMIC.fetch_and(false, SeqCst); + assert_eq!(*ATOMIC.get_mut(), false); + ATOMIC.fetch_nand(true, SeqCst); + assert_eq!(*ATOMIC.get_mut(), true); + ATOMIC.fetch_xor(true, SeqCst); + assert_eq!(*ATOMIC.get_mut(), false); + } +} diff --git a/tests/run-pass/atomic-compare_exchange.rs b/tests/run-pass/atomic-compare_exchange.rs new file mode 100644 index 0000000000000..61e9a96588966 --- /dev/null +++ b/tests/run-pass/atomic-compare_exchange.rs @@ -0,0 +1,36 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::sync::atomic::{AtomicIsize, ATOMIC_ISIZE_INIT}; +use std::sync::atomic::Ordering::*; + +static ATOMIC: AtomicIsize = ATOMIC_ISIZE_INIT; + +fn main() { + // Make sure trans can emit all the intrinsics correctly + ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, Acquire, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, Release, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, AcqRel, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, SeqCst, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, Acquire, Acquire).ok(); + ATOMIC.compare_exchange(0, 1, AcqRel, Acquire).ok(); + ATOMIC.compare_exchange(0, 1, SeqCst, Acquire).ok(); + ATOMIC.compare_exchange(0, 1, SeqCst, SeqCst).ok(); + ATOMIC.compare_exchange_weak(0, 1, Relaxed, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, Acquire, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, AcqRel, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, SeqCst, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, Acquire, Acquire).ok(); + ATOMIC.compare_exchange_weak(0, 1, AcqRel, Acquire).ok(); + ATOMIC.compare_exchange_weak(0, 1, SeqCst, Acquire).ok(); + ATOMIC.compare_exchange_weak(0, 1, SeqCst, SeqCst).ok(); +} diff --git a/tests/run-pass/aux_test.rs b/tests/run-pass/aux_test.rs new file mode 100644 index 0000000000000..beed82e058029 --- /dev/null +++ b/tests/run-pass/aux_test.rs @@ -0,0 +1,9 @@ +// aux-build:dep.rs + +// ignore-cross-compile + +extern crate dep; + +fn main() { + dep::foo(); +} diff --git a/tests/run-pass/auxiliary/dep.rs b/tests/run-pass/auxiliary/dep.rs new file mode 100644 index 0000000000000..b76b4321d62aa --- /dev/null +++ b/tests/run-pass/auxiliary/dep.rs @@ -0,0 +1 @@ +pub fn foo() {} diff --git a/tests/run-pass/bad_substs.rs b/tests/run-pass/bad_substs.rs new file mode 100644 index 0000000000000..d8da2de5d6df9 --- /dev/null +++ b/tests/run-pass/bad_substs.rs @@ -0,0 +1,4 @@ +fn main() { + let f: fn(i32) -> Option = Some::; + f(42); +} diff --git a/tests/run-pass/binops.rs b/tests/run-pass/binops.rs new file mode 100644 index 0000000000000..a03b96fa499fd --- /dev/null +++ b/tests/run-pass/binops.rs @@ -0,0 +1,91 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Binop corner cases + +fn test_nil() { + assert_eq!((), ()); + assert!((!(() != ()))); + assert!((!(() < ()))); + assert!((() <= ())); + assert!((!(() > ()))); + assert!((() >= ())); +} + +fn test_bool() { + assert!((!(true < false))); + assert!((!(true <= false))); + assert!((true > false)); + assert!((true >= false)); + + assert!((false < true)); + assert!((false <= true)); + assert!((!(false > true))); + assert!((!(false >= true))); + + // Bools support bitwise binops + assert_eq!(false & false, false); + assert_eq!(true & false, false); + assert_eq!(true & true, true); + assert_eq!(false | false, false); + assert_eq!(true | false, true); + assert_eq!(true | true, true); + assert_eq!(false ^ false, false); + assert_eq!(true ^ false, true); + assert_eq!(true ^ true, false); +} + +fn test_ptr() { + unsafe { + let p1: *const u8 = ::std::mem::transmute(0_usize); + let p2: *const u8 = ::std::mem::transmute(0_usize); + let p3: *const u8 = ::std::mem::transmute(1_usize); + + assert_eq!(p1, p2); + assert!(p1 != p3); + assert!(p1 < p3); + assert!(p1 <= p3); + assert!(p3 > p1); + assert!(p3 >= p3); + assert!(p1 <= p2); + assert!(p1 >= p2); + } +} + +#[derive(PartialEq, Debug)] +struct P { + x: isize, + y: isize, +} + +fn p(x: isize, y: isize) -> P { + P { + x: x, + y: y + } +} + +fn test_class() { + let q = p(1, 2); + let mut r = p(1, 2); + + assert_eq!(q, r); + r.y = 17; + assert!((r.y != q.y)); + assert_eq!(r.y, 17); + assert!((q != r)); +} + +pub fn main() { + test_nil(); + test_bool(); + test_ptr(); + test_class(); +} diff --git a/tests/run-pass/bools.rs b/tests/run-pass/bools.rs new file mode 100644 index 0000000000000..103d7eac27cde --- /dev/null +++ b/tests/run-pass/bools.rs @@ -0,0 +1,28 @@ +fn boolean() -> bool { + true +} + +fn if_false() -> i64 { + let c = false; + if c { 1 } else { 0 } +} + +fn if_true() -> i64 { + let c = true; + if c { 1 } else { 0 } +} + +fn match_bool() -> i16 { + let b = true; + match b { + true => 1, + _ => 0, + } +} + +fn main() { + assert!(boolean()); + assert_eq!(if_false(), 0); + assert_eq!(if_true(), 1); + assert_eq!(match_bool(), 1); +} diff --git a/tests/run-pass/box_box_trait.rs b/tests/run-pass/box_box_trait.rs new file mode 100644 index 0000000000000..57eef52d573b9 --- /dev/null +++ b/tests/run-pass/box_box_trait.rs @@ -0,0 +1,29 @@ +#![feature(box_syntax)] + +struct DroppableStruct; + +static mut DROPPED: bool = false; + +impl Drop for DroppableStruct { + fn drop(&mut self) { + unsafe { DROPPED = true; } + } +} + +trait MyTrait { fn dummy(&self) { } } +impl MyTrait for Box {} + +struct Whatever { w: Box } +impl Whatever { + fn new(w: Box) -> Whatever { + Whatever { w: w } + } +} + +fn main() { + { + let f: Box<_> = box DroppableStruct; + let _a = Whatever::new(box f as Box); + } + assert!(unsafe { DROPPED }); +} diff --git a/tests/run-pass/c_enums.rs b/tests/run-pass/c_enums.rs new file mode 100644 index 0000000000000..11897b73eb2ad --- /dev/null +++ b/tests/run-pass/c_enums.rs @@ -0,0 +1,32 @@ +enum Foo { + Bar = 42, + Baz, + Quux = 100, +} + +enum Signed { + Bar = -42, + Baz, + Quux = 100, +} + +fn foo() -> [u8; 3] { + [Foo::Bar as u8, Foo::Baz as u8, Foo::Quux as u8] +} + +fn signed() -> [i8; 3] { + [Signed::Bar as i8, Signed::Baz as i8, Signed::Quux as i8] +} + +fn unsafe_match() -> bool { + match unsafe { std::mem::transmute::(43) } { + Foo::Baz => true, + _ => false, + } +} + +fn main() { + assert_eq!(foo(), [42, 43, 100]); + assert_eq!(signed(), [-42, -41, 100]); + assert!(unsafe_match()); +} diff --git a/tests/run-pass/call_drop_on_array_elements.rs b/tests/run-pass/call_drop_on_array_elements.rs new file mode 100644 index 0000000000000..c9b59f635e145 --- /dev/null +++ b/tests/run-pass/call_drop_on_array_elements.rs @@ -0,0 +1,22 @@ +struct Bar(u16); // ZSTs are tested separately + +static mut DROP_COUNT: usize = 0; + +impl Drop for Bar { + fn drop(&mut self) { + assert_eq!(self.0 as usize, unsafe { DROP_COUNT }); // tests whether we are called at a valid address + unsafe { DROP_COUNT += 1; } + } +} + +fn main() { + let b = [Bar(0), Bar(1), Bar(2), Bar(3)]; + assert_eq!(unsafe { DROP_COUNT }, 0); + drop(b); + assert_eq!(unsafe { DROP_COUNT }, 4); + + // check empty case + let b : [Bar; 0] = []; + drop(b); + assert_eq!(unsafe { DROP_COUNT }, 4); +} diff --git a/tests/run-pass/call_drop_on_fat_ptr_array_elements.rs b/tests/run-pass/call_drop_on_fat_ptr_array_elements.rs new file mode 100644 index 0000000000000..a1ab5c45e358c --- /dev/null +++ b/tests/run-pass/call_drop_on_fat_ptr_array_elements.rs @@ -0,0 +1,20 @@ +trait Foo {} + +struct Bar; + +impl Foo for Bar {} + +static mut DROP_COUNT: usize = 0; + +impl Drop for Bar { + fn drop(&mut self) { + unsafe { DROP_COUNT += 1; } + } +} + +fn main() { + let b: [Box; 4] = [Box::new(Bar), Box::new(Bar), Box::new(Bar), Box::new(Bar)]; + assert_eq!(unsafe { DROP_COUNT }, 0); + drop(b); + assert_eq!(unsafe { DROP_COUNT }, 4); +} diff --git a/tests/run-pass/call_drop_on_zst_array_elements.rs b/tests/run-pass/call_drop_on_zst_array_elements.rs new file mode 100644 index 0000000000000..1887130fdd8a4 --- /dev/null +++ b/tests/run-pass/call_drop_on_zst_array_elements.rs @@ -0,0 +1,21 @@ +struct Bar; + +static mut DROP_COUNT: usize = 0; + +impl Drop for Bar { + fn drop(&mut self) { + unsafe { DROP_COUNT += 1; } + } +} + +fn main() { + let b = [Bar, Bar, Bar, Bar]; + assert_eq!(unsafe { DROP_COUNT }, 0); + drop(b); + assert_eq!(unsafe { DROP_COUNT }, 4); + + // check empty case + let b : [Bar; 0] = []; + drop(b); + assert_eq!(unsafe { DROP_COUNT }, 4); +} diff --git a/tests/run-pass/call_drop_through_owned_slice.rs b/tests/run-pass/call_drop_through_owned_slice.rs new file mode 100644 index 0000000000000..3ec6be65ed8b6 --- /dev/null +++ b/tests/run-pass/call_drop_through_owned_slice.rs @@ -0,0 +1,16 @@ +struct Bar; + +static mut DROP_COUNT: usize = 0; + +impl Drop for Bar { + fn drop(&mut self) { + unsafe { DROP_COUNT += 1; } + } +} + +fn main() { + let b: Box<[Bar]> = vec![Bar, Bar, Bar, Bar].into_boxed_slice(); + assert_eq!(unsafe { DROP_COUNT }, 0); + drop(b); + assert_eq!(unsafe { DROP_COUNT }, 4); +} diff --git a/tests/run-pass/call_drop_through_trait_object.rs b/tests/run-pass/call_drop_through_trait_object.rs new file mode 100644 index 0000000000000..9b6acf0b14746 --- /dev/null +++ b/tests/run-pass/call_drop_through_trait_object.rs @@ -0,0 +1,20 @@ +trait Foo {} + +struct Bar; + +static mut DROP_CALLED: bool = false; + +impl Drop for Bar { + fn drop(&mut self) { + unsafe { DROP_CALLED = true; } + } +} + +impl Foo for Bar {} + +fn main() { + let b: Box = Box::new(Bar); + assert!(unsafe { !DROP_CALLED }); + drop(b); + assert!(unsafe { DROP_CALLED }); +} diff --git a/tests/run-pass/call_drop_through_trait_object_rc.rs b/tests/run-pass/call_drop_through_trait_object_rc.rs new file mode 100644 index 0000000000000..ce56ca6a1cafd --- /dev/null +++ b/tests/run-pass/call_drop_through_trait_object_rc.rs @@ -0,0 +1,22 @@ +trait Foo {} + +struct Bar; + +static mut DROP_CALLED: bool = false; + +impl Drop for Bar { + fn drop(&mut self) { + unsafe { DROP_CALLED = true; } + } +} + +impl Foo for Bar {} + +use std::rc::Rc; + +fn main() { + let b: Rc = Rc::new(Bar); + assert!(unsafe { !DROP_CALLED }); + drop(b); + assert!(unsafe { DROP_CALLED }); +} diff --git a/tests/run-pass/calls.rs b/tests/run-pass/calls.rs new file mode 100644 index 0000000000000..c4ba4a9b701ff --- /dev/null +++ b/tests/run-pass/calls.rs @@ -0,0 +1,45 @@ +#![feature(const_fn)] + +fn call() -> i32 { + fn increment(x: i32) -> i32 { + x + 1 + } + increment(1) +} + +fn factorial_recursive() -> i64 { + fn fact(n: i64) -> i64 { + if n == 0 { + 1 + } else { + n * fact(n - 1) + } + } + fact(10) +} + +fn call_generic() -> (i16, bool) { + fn id(t: T) -> T { t } + (id(42), id(true)) +} + +// Test calling a very simple function from the standard library. +fn cross_crate_fn_call() -> i64 { + if 1i32.is_positive() { 1 } else { 0 } +} + +const fn foo(i: i64) -> i64 { *&i + 1 } + +fn const_fn_call() -> i64 { + let x = 5 + foo(5); + assert_eq!(x, 11); + x +} + +fn main() { + assert_eq!(call(), 2); + assert_eq!(factorial_recursive(), 3628800); + assert_eq!(call_generic(), (42, true)); + assert_eq!(cross_crate_fn_call(), 1); + assert_eq!(const_fn_call(), 11); +} diff --git a/tests/run-pass/cast-rfc0401-vtable-kinds.rs b/tests/run-pass/cast-rfc0401-vtable-kinds.rs new file mode 100644 index 0000000000000..3a9f24ad4cc7c --- /dev/null +++ b/tests/run-pass/cast-rfc0401-vtable-kinds.rs @@ -0,0 +1,54 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Check that you can cast between different pointers to trait objects +// whose vtable have the same kind (both lengths, or both trait pointers). + +trait Foo { + fn foo(&self, _: T) -> u32 { 42 } +} + +trait Bar { + fn bar(&self) { println!("Bar!"); } +} + +impl Foo for () {} +impl Foo for u32 { fn foo(&self, _: u32) -> u32 { self+43 } } +impl Bar for () {} + +unsafe fn round_trip_and_call<'a>(t: *const (Foo+'a)) -> u32 { + let foo_e : *const Foo = t as *const _; + let r_1 = foo_e as *mut Foo; + + (&*r_1).foo(0) +} + +#[repr(C)] +struct FooS(T); +#[repr(C)] +struct BarS(T); + +fn foo_to_bar(u: *const FooS) -> *const BarS { + u as *const BarS +} + +fn main() { + let x = 4u32; + let y : &Foo = &x; + let fl = unsafe { round_trip_and_call(y as *const Foo) }; + assert_eq!(fl, (43+4)); + + let s = FooS([0,1,2]); + let u: &FooS<[u32]> = &s; + let u: *const FooS<[u32]> = u; + let bar_ref : *const BarS<[u32]> = foo_to_bar(u); + let z : &BarS<[u32]> = unsafe{&*bar_ref}; + assert_eq!(&z.0, &[0,1,2]); +} diff --git a/tests/run-pass/cast_fn_ptr.rs b/tests/run-pass/cast_fn_ptr.rs new file mode 100644 index 0000000000000..109e8dfc2a02b --- /dev/null +++ b/tests/run-pass/cast_fn_ptr.rs @@ -0,0 +1,9 @@ +fn main() { + fn f(_: *const u8) {} + + let g = unsafe { + std::mem::transmute::(f) + }; + + g(&42 as *const _); +} diff --git a/tests/run-pass/cast_fn_ptr_unsafe.rs b/tests/run-pass/cast_fn_ptr_unsafe.rs new file mode 100644 index 0000000000000..0cabb369bfdd9 --- /dev/null +++ b/tests/run-pass/cast_fn_ptr_unsafe.rs @@ -0,0 +1,8 @@ +fn main() { + fn f() {} + + let g = f as fn() as unsafe fn(); + unsafe { + g(); + } +} diff --git a/tests/run-pass/catch.rs b/tests/run-pass/catch.rs new file mode 100644 index 0000000000000..439edc82dde25 --- /dev/null +++ b/tests/run-pass/catch.rs @@ -0,0 +1,9 @@ +use std::panic::{catch_unwind, AssertUnwindSafe}; + +fn main() { + let mut i = 3; + let _ = catch_unwind(AssertUnwindSafe(|| {i -= 2;} )); + for _ in 0..i { + println!("I"); + } +} diff --git a/tests/run-pass/char.rs b/tests/run-pass/char.rs new file mode 100644 index 0000000000000..505c09b0ad885 --- /dev/null +++ b/tests/run-pass/char.rs @@ -0,0 +1,9 @@ +fn main() { + let c = 'x'; + assert_eq!(c, 'x'); + assert!('a' < 'z'); + assert!('1' < '9'); + assert_eq!(std::char::from_u32('x' as u32).unwrap(), 'x'); + // FIXME: + // assert_eq!(std::char::from_u32('x' as u32), Some('x')); +} diff --git a/tests/run-pass/closure-drop.rs b/tests/run-pass/closure-drop.rs new file mode 100644 index 0000000000000..f1bdafaeb1354 --- /dev/null +++ b/tests/run-pass/closure-drop.rs @@ -0,0 +1,25 @@ +struct Foo<'a>(&'a mut bool); + +impl<'a> Drop for Foo<'a> { + fn drop(&mut self) { + *self.0 = true; + } +} + +fn f(t: T) { + t() +} + +fn main() { + let mut ran_drop = false; + { + let x = Foo(&mut ran_drop); + // this closure never by val uses its captures + // so it's basically a fn(&self) + // the shim used to not drop the `x` + let x = move || { let _ = x; }; + f(x); + } + assert!(ran_drop); +} + diff --git a/tests/run-pass/closures.rs b/tests/run-pass/closures.rs new file mode 100644 index 0000000000000..9b379051eb774 --- /dev/null +++ b/tests/run-pass/closures.rs @@ -0,0 +1,48 @@ +fn simple() -> i32 { + let y = 10; + let f = |x| x + y; + f(2) +} + +fn crazy_closure() -> (i32, i32, i32) { + fn inner(t: T) -> (i32, T, T) { + struct NonCopy; + let x = NonCopy; + + let a = 2; + let b = 40; + let f = move |y, z, asdf| { + drop(x); + (a + b + y + z, asdf, t) + }; + f(a, b, t) + } + + inner(10) +} + +fn closure_arg_adjustment_problem() -> i64 { + fn once(f: F) { f(2); } + let mut y = 1; + { + let f = |x| y += x; + once(f); + } + y +} + +fn fn_once_closure_with_multiple_args() -> i64 { + fn once i64>(f: F) -> i64 { f(2, 3) } + let y = 1; + { + let f = |x, z| x + y + z; + once(f) + } +} + +fn main() { + assert_eq!(simple(), 12); + assert_eq!(crazy_closure(), (84, 10, 10)); + assert_eq!(closure_arg_adjustment_problem(), 3); + assert_eq!(fn_once_closure_with_multiple_args(), 6); +} diff --git a/tests/run-pass/const-vec-of-fns.rs b/tests/run-pass/const-vec-of-fns.rs new file mode 100644 index 0000000000000..0338a766e2627 --- /dev/null +++ b/tests/run-pass/const-vec-of-fns.rs @@ -0,0 +1,29 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// pretty-expanded FIXME #23616 + +/*! + * Try to double-check that static fns have the right size (with or + * without dummy env ptr, as appropriate) by iterating a size-2 array. + * If the static size differs from the runtime size, the second element + * should be read as a null or otherwise wrong pointer and crash. + */ + +fn f() { } +static mut CLOSURES: &'static mut [fn()] = &mut [f as fn(), f as fn()]; + +pub fn main() { + unsafe { + for closure in &mut *CLOSURES { + (*closure)() + } + } +} diff --git a/tests/run-pass/constants.rs b/tests/run-pass/constants.rs new file mode 100644 index 0000000000000..718c852601420 --- /dev/null +++ b/tests/run-pass/constants.rs @@ -0,0 +1,9 @@ +const A: usize = *&5; + +fn foo() -> usize { + A +} + +fn main() { + assert_eq!(foo(), A); +} diff --git a/tests/run-pass/deriving-associated-types.rs b/tests/run-pass/deriving-associated-types.rs new file mode 100644 index 0000000000000..b67ef85acf62d --- /dev/null +++ b/tests/run-pass/deriving-associated-types.rs @@ -0,0 +1,208 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub trait DeclaredTrait { + type Type; +} + +impl DeclaredTrait for i32 { + type Type = i32; +} + +pub trait WhereTrait { + type Type; +} + +impl WhereTrait for i32 { + type Type = i32; +} + +// Make sure we don't add a bound that just shares a name with an associated +// type. +pub mod module { + pub type Type = i32; +} + +#[derive(PartialEq, Debug)] +struct PrivateStruct(T); + +#[derive(PartialEq, Debug)] +struct TupleStruct( + module::Type, + Option, + A, + PrivateStruct, + B, + B::Type, + Option, + ::Type, + Option<::Type>, + C, + C::Type, + Option, + ::Type, + Option<::Type>, + ::Type, +) where C: WhereTrait; + +#[derive(PartialEq, Debug)] +pub struct Struct where C: WhereTrait { + m1: module::Type, + m2: Option, + a1: A, + a2: PrivateStruct, + b: B, + b1: B::Type, + b2: Option, + b3: ::Type, + b4: Option<::Type>, + c: C, + c1: C::Type, + c2: Option, + c3: ::Type, + c4: Option<::Type>, + d: ::Type, +} + +#[derive(PartialEq, Debug)] +enum Enum where C: WhereTrait { + Unit, + Seq( + module::Type, + Option, + A, + PrivateStruct, + B, + B::Type, + Option, + ::Type, + Option<::Type>, + C, + C::Type, + Option, + ::Type, + Option<::Type>, + ::Type, + ), + Map { + m1: module::Type, + m2: Option, + a1: A, + a2: PrivateStruct, + b: B, + b1: B::Type, + b2: Option, + b3: ::Type, + b4: Option<::Type>, + c: C, + c1: C::Type, + c2: Option, + c3: ::Type, + c4: Option<::Type>, + d: ::Type, + }, +} + +fn main() { + + let e: Enum< + i32, + i32, + i32, + > = Enum::Seq( + 0, + None, + 0, + PrivateStruct(0), + 0, + 0, + None, + 0, + None, + 0, + 0, + None, + 0, + None, + 0, + ); + assert_eq!(e, e); + + let e: Enum< + i32, + i32, + i32, + > = Enum::Map { + m1: 0, + m2: None, + a1: 0, + a2: PrivateStruct(0), + b: 0, + b1: 0, + b2: None, + b3: 0, + b4: None, + c: 0, + c1: 0, + c2: None, + c3: 0, + c4: None, + d: 0, + }; + assert_eq!(e, e); + let e: TupleStruct< + i32, + i32, + i32, + > = TupleStruct( + 0, + None, + 0, + PrivateStruct(0), + 0, + 0, + None, + 0, + None, + 0, + 0, + None, + 0, + None, + 0, + ); + assert_eq!(e, e); + + let e: Struct< + i32, + i32, + i32, + > = Struct { + m1: 0, + m2: None, + a1: 0, + a2: PrivateStruct(0), + b: 0, + b1: 0, + b2: None, + b3: 0, + b4: None, + c: 0, + c1: 0, + c2: None, + c3: 0, + c4: None, + d: 0, + }; + assert_eq!(e, e); + + let e = Enum::Unit::; + assert_eq!(e, e); +} diff --git a/tests/run-pass/drop_empty_slice.rs b/tests/run-pass/drop_empty_slice.rs new file mode 100644 index 0000000000000..b21c8a612c57b --- /dev/null +++ b/tests/run-pass/drop_empty_slice.rs @@ -0,0 +1,7 @@ +#![feature(box_syntax)] + +fn main() { + // With the nested Vec, this is calling Offset(Unique::empty(), 0) on drop. + let args : Vec> = Vec::new(); + let _ = box args; +} diff --git a/tests/run-pass/dst-field-align.rs b/tests/run-pass/dst-field-align.rs new file mode 100644 index 0000000000000..5631b65ed9d8a --- /dev/null +++ b/tests/run-pass/dst-field-align.rs @@ -0,0 +1,77 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(dead_code)] + +struct Foo { + a: u16, + b: T +} + +trait Bar { + fn get(&self) -> usize; +} + +impl Bar for usize { + fn get(&self) -> usize { *self } +} + +struct Baz { + a: T +} + +struct HasDrop { + ptr: Box, + data: T +} + +fn main() { + // Test that zero-offset works properly + let b : Baz = Baz { a: 7 }; + assert_eq!(b.a.get(), 7); + let b : &Baz = &b; + assert_eq!(b.a.get(), 7); + + // Test that the field is aligned properly + let f : Foo = Foo { a: 0, b: 11 }; + assert_eq!(f.b.get(), 11); + let ptr1 : *const u8 = &f.b as *const _ as *const u8; + + let f : &Foo = &f; + let ptr2 : *const u8 = &f.b as *const _ as *const u8; + assert_eq!(f.b.get(), 11); + + // The pointers should be the same + assert_eq!(ptr1, ptr2); + + // Test that nested DSTs work properly + let f : Foo> = Foo { a: 0, b: Foo { a: 1, b: 17 }}; + assert_eq!(f.b.b.get(), 17); + let f : &Foo> = &f; + assert_eq!(f.b.b.get(), 17); + + // Test that get the pointer via destructuring works + + let f : Foo = Foo { a: 0, b: 11 }; + let f : &Foo = &f; + let &Foo { a: _, b: ref bar } = f; + assert_eq!(bar.get(), 11); + + // Make sure that drop flags don't screw things up + + let d : HasDrop> = HasDrop { + ptr: Box::new(0), + data: Baz { a: [1,2,3,4] } + }; + assert_eq!([1,2,3,4], d.data.a); + + let d : &HasDrop> = &d; + assert_eq!(&[1,2,3,4], &d.data.a); +} diff --git a/tests/run-pass/dst-irrefutable-bind.rs b/tests/run-pass/dst-irrefutable-bind.rs new file mode 100644 index 0000000000000..9f8067f372aef --- /dev/null +++ b/tests/run-pass/dst-irrefutable-bind.rs @@ -0,0 +1,24 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +struct Test(T); + +fn main() { + let x = Test([1,2,3]); + let x : &Test<[i32]> = &x; + + let & ref _y = x; + + // Make sure binding to a fat pointer behind a reference + // still works + let slice = &[1,2,3]; + let x = Test(&slice); + let Test(&_slice) = x; +} diff --git a/tests/run-pass/dst-raw.rs b/tests/run-pass/dst-raw.rs new file mode 100644 index 0000000000000..3a74626b0299f --- /dev/null +++ b/tests/run-pass/dst-raw.rs @@ -0,0 +1,113 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Test DST raw pointers + + +trait Trait { + fn foo(&self) -> isize; +} + +struct A { + f: isize +} +impl Trait for A { + fn foo(&self) -> isize { + self.f + } +} + +struct Foo { + f: T +} + +pub fn main() { + // raw trait object + let x = A { f: 42 }; + let z: *const Trait = &x; + let r = unsafe { + (&*z).foo() + }; + assert_eq!(r, 42); + + // raw DST struct + let p = Foo {f: A { f: 42 }}; + let o: *const Foo = &p; + let r = unsafe { + (&*o).f.foo() + }; + assert_eq!(r, 42); + + // raw slice + let a: *const [_] = &[1, 2, 3]; + unsafe { + let b = (*a)[2]; + assert_eq!(b, 3); + let len = (*a).len(); + assert_eq!(len, 3); + } + + // raw slice with explicit cast + let a = &[1, 2, 3] as *const [i32]; + unsafe { + let b = (*a)[2]; + assert_eq!(b, 3); + let len = (*a).len(); + assert_eq!(len, 3); + } + + // raw DST struct with slice + let c: *const Foo<[_]> = &Foo {f: [1, 2, 3]}; + unsafe { + let b = (&*c).f[0]; + assert_eq!(b, 1); + let len = (&*c).f.len(); + assert_eq!(len, 3); + } + + // all of the above with *mut + let mut x = A { f: 42 }; + let z: *mut Trait = &mut x; + let r = unsafe { + (&*z).foo() + }; + assert_eq!(r, 42); + + let mut p = Foo {f: A { f: 42 }}; + let o: *mut Foo = &mut p; + let r = unsafe { + (&*o).f.foo() + }; + assert_eq!(r, 42); + + let a: *mut [_] = &mut [1, 2, 3]; + unsafe { + let b = (*a)[2]; + assert_eq!(b, 3); + let len = (*a).len(); + assert_eq!(len, 3); + } + + let a = &mut [1, 2, 3] as *mut [i32]; + unsafe { + let b = (*a)[2]; + assert_eq!(b, 3); + let len = (*a).len(); + assert_eq!(len, 3); + } + + let c: *mut Foo<[_]> = &mut Foo {f: [1, 2, 3]}; + unsafe { + let b = (&*c).f[0]; + assert_eq!(b, 1); + let len = (&*c).f.len(); + assert_eq!(len, 3); + } +} diff --git a/tests/run-pass/dst-struct-sole.rs b/tests/run-pass/dst-struct-sole.rs new file mode 100644 index 0000000000000..58d7b35a5275c --- /dev/null +++ b/tests/run-pass/dst-struct-sole.rs @@ -0,0 +1,85 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// As dst-struct.rs, but the unsized field is the only field in the struct. + + +struct Fat { + ptr: T +} + +// x is a fat pointer +fn foo(x: &Fat<[isize]>) { + let y = &x.ptr; + assert_eq!(x.ptr.len(), 3); + assert_eq!(y[0], 1); + assert_eq!(x.ptr[1], 2); +} + +fn foo2(x: &Fat<[T]>) { + let y = &x.ptr; + let bar = Bar; + assert_eq!(x.ptr.len(), 3); + assert_eq!(y[0].to_bar(), bar); + assert_eq!(x.ptr[1].to_bar(), bar); +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +struct Bar; + +trait ToBar { + fn to_bar(&self) -> Bar; +} + +impl ToBar for Bar { + fn to_bar(&self) -> Bar { + *self + } +} + +pub fn main() { + // With a vec of ints. + let f1 = Fat { ptr: [1, 2, 3] }; + foo(&f1); + let f2 = &f1; + foo(f2); + let f3: &Fat<[isize]> = f2; + foo(f3); + let f4: &Fat<[isize]> = &f1; + foo(f4); + let f5: &Fat<[isize]> = &Fat { ptr: [1, 2, 3] }; + foo(f5); + + // With a vec of Bars. + let bar = Bar; + let f1 = Fat { ptr: [bar, bar, bar] }; + foo2(&f1); + let f2 = &f1; + foo2(f2); + let f3: &Fat<[Bar]> = f2; + foo2(f3); + let f4: &Fat<[Bar]> = &f1; + foo2(f4); + let f5: &Fat<[Bar]> = &Fat { ptr: [bar, bar, bar] }; + foo2(f5); + + // Assignment. + let f5: &mut Fat<[isize]> = &mut Fat { ptr: [1, 2, 3] }; + f5.ptr[1] = 34; + assert_eq!(f5.ptr[0], 1); + assert_eq!(f5.ptr[1], 34); + assert_eq!(f5.ptr[2], 3); + + // Zero size vec. + let f5: &Fat<[isize]> = &Fat { ptr: [] }; + assert!(f5.ptr.is_empty()); + let f5: &Fat<[Bar]> = &Fat { ptr: [] }; + assert!(f5.ptr.is_empty()); +} diff --git a/tests/run-pass/dst-struct.rs b/tests/run-pass/dst-struct.rs new file mode 100644 index 0000000000000..932b571eccdbb --- /dev/null +++ b/tests/run-pass/dst-struct.rs @@ -0,0 +1,134 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +#![allow(unused_features)] +#![feature(box_syntax)] + +struct Fat { + f1: isize, + f2: &'static str, + ptr: T +} + +// x is a fat pointer +fn foo(x: &Fat<[isize]>) { + let y = &x.ptr; + assert_eq!(x.ptr.len(), 3); + assert_eq!(y[0], 1); + assert_eq!(x.ptr[1], 2); + assert_eq!(x.f1, 5); + assert_eq!(x.f2, "some str"); +} + +fn foo2(x: &Fat<[T]>) { + let y = &x.ptr; + let bar = Bar; + assert_eq!(x.ptr.len(), 3); + assert_eq!(y[0].to_bar(), bar); + assert_eq!(x.ptr[1].to_bar(), bar); + assert_eq!(x.f1, 5); + assert_eq!(x.f2, "some str"); +} + +fn foo3(x: &Fat>) { + let y = &x.ptr.ptr; + assert_eq!(x.f1, 5); + assert_eq!(x.f2, "some str"); + assert_eq!(x.ptr.f1, 8); + assert_eq!(x.ptr.f2, "deep str"); + assert_eq!(x.ptr.ptr.len(), 3); + assert_eq!(y[0], 1); + assert_eq!(x.ptr.ptr[1], 2); +} + + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +struct Bar; + +trait ToBar { + fn to_bar(&self) -> Bar; +} + +impl ToBar for Bar { + fn to_bar(&self) -> Bar { + *self + } +} + +pub fn main() { + // With a vec of ints. + let f1 = Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] }; + foo(&f1); + let f2 = &f1; + foo(f2); + let f3: &Fat<[isize]> = f2; + foo(f3); + let f4: &Fat<[isize]> = &f1; + foo(f4); + let f5: &Fat<[isize]> = &Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] }; + foo(f5); + + // With a vec of Bars. + let bar = Bar; + let f1 = Fat { f1: 5, f2: "some str", ptr: [bar, bar, bar] }; + foo2(&f1); + let f2 = &f1; + foo2(f2); + let f3: &Fat<[Bar]> = f2; + foo2(f3); + let f4: &Fat<[Bar]> = &f1; + foo2(f4); + let f5: &Fat<[Bar]> = &Fat { f1: 5, f2: "some str", ptr: [bar, bar, bar] }; + foo2(f5); + + // Assignment. + let f5: &mut Fat<[isize]> = &mut Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] }; + f5.ptr[1] = 34; + assert_eq!(f5.ptr[0], 1); + assert_eq!(f5.ptr[1], 34); + assert_eq!(f5.ptr[2], 3); + + // Zero size vec. + let f5: &Fat<[isize]> = &Fat { f1: 5, f2: "some str", ptr: [] }; + assert!(f5.ptr.is_empty()); + let f5: &Fat<[Bar]> = &Fat { f1: 5, f2: "some str", ptr: [] }; + assert!(f5.ptr.is_empty()); + + // Deeply nested. + let f1 = Fat { f1: 5, f2: "some str", ptr: Fat { f1: 8, f2: "deep str", ptr: [1, 2, 3]} }; + foo3(&f1); + let f2 = &f1; + foo3(f2); + let f3: &Fat> = f2; + foo3(f3); + let f4: &Fat> = &f1; + foo3(f4); + let f5: &Fat> = + &Fat { f1: 5, f2: "some str", ptr: Fat { f1: 8, f2: "deep str", ptr: [1, 2, 3]} }; + foo3(f5); + + // Box. + let f1 = Box::new([1, 2, 3]); + assert_eq!((*f1)[1], 2); + let f2: Box<[isize]> = f1; + assert_eq!((*f2)[1], 2); + + // Nested Box. + let f1 : Box> = box Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] }; + foo(&*f1); + let f2 : Box> = f1; + foo(&*f2); + + // FIXME (#22405): Replace `Box::new` with `box` here when/if possible. + let f3 : Box> = + Box::>::new(Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] }); + foo(&*f3); +} diff --git a/tests/run-pass/enum-nullable-const-null-with-fields.rs b/tests/run-pass/enum-nullable-const-null-with-fields.rs new file mode 100644 index 0000000000000..1342c4e104de5 --- /dev/null +++ b/tests/run-pass/enum-nullable-const-null-with-fields.rs @@ -0,0 +1,22 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +use std::result::Result; +use std::result::Result::Ok; + +static C: Result<(), Box> = Ok(()); + +// This is because of yet another bad assertion (ICE) about the null side of a nullable enum. +// So we won't actually compile if the bug is present, but we check the value in main anyway. + +pub fn main() { + assert!(C.is_ok()); +} diff --git a/tests/run-pass/enums.rs b/tests/run-pass/enums.rs new file mode 100644 index 0000000000000..1f27292904f42 --- /dev/null +++ b/tests/run-pass/enums.rs @@ -0,0 +1,34 @@ +enum MyEnum { + MyEmptyVariant, + MyNewtypeVariant(i32), + MyTupleVariant(i32, i32), + MyStructVariant { + my_first_field: i32, + my_second_field: i32, + } +} + +fn test(me: MyEnum) { + match me { + MyEnum::MyEmptyVariant => {}, + MyEnum::MyNewtypeVariant(ref val) => assert_eq!(val, &42), + MyEnum::MyTupleVariant(ref a, ref b) => { + assert_eq!(a, &43); + assert_eq!(b, &44); + }, + MyEnum::MyStructVariant { ref my_first_field, ref my_second_field } => { + assert_eq!(my_first_field, &45); + assert_eq!(my_second_field, &46); + }, + } +} + +fn main() { + test(MyEnum::MyEmptyVariant); + test(MyEnum::MyNewtypeVariant(42)); + test(MyEnum::MyTupleVariant(43, 44)); + test(MyEnum::MyStructVariant{ + my_first_field: 45, + my_second_field: 46, + }); +} diff --git a/tests/run-pass/float_fast_math.rs b/tests/run-pass/float_fast_math.rs new file mode 100644 index 0000000000000..c1b4b55bd3723 --- /dev/null +++ b/tests/run-pass/float_fast_math.rs @@ -0,0 +1,30 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(core_intrinsics)] + +use std::intrinsics::{fadd_fast, fsub_fast, fmul_fast, fdiv_fast, frem_fast}; + +#[inline(never)] +pub fn test_operations(a: f64, b: f64) { + // make sure they all map to the correct operation + unsafe { + assert_eq!(fadd_fast(a, b), a + b); + assert_eq!(fsub_fast(a, b), a - b); + assert_eq!(fmul_fast(a, b), a * b); + assert_eq!(fdiv_fast(a, b), a / b); + assert_eq!(frem_fast(a, b), a % b); + } +} + +fn main() { + test_operations(1., 2.); + test_operations(10., 5.); +} diff --git a/tests/run-pass/floats.rs b/tests/run-pass/floats.rs new file mode 100644 index 0000000000000..9c4d0594d1c99 --- /dev/null +++ b/tests/run-pass/floats.rs @@ -0,0 +1,11 @@ + +fn main() { + assert_eq!(6.0_f32*6.0_f32, 36.0_f32); + assert_eq!(6.0_f64*6.0_f64, 36.0_f64); + assert_eq!(-{5.0_f32}, -5.0_f32); + assert!((5.0_f32/0.0).is_infinite()); + assert!((-5.0_f32).sqrt().is_nan()); + let x: u64 = unsafe { std::mem::transmute(42.0_f64) }; + let y: f64 = unsafe { std::mem::transmute(x) }; + assert_eq!(y, 42.0_f64); +} diff --git a/tests/run-pass/fn_item_as_closure_trait_object.rs b/tests/run-pass/fn_item_as_closure_trait_object.rs new file mode 100644 index 0000000000000..799f97a4f6fde --- /dev/null +++ b/tests/run-pass/fn_item_as_closure_trait_object.rs @@ -0,0 +1,6 @@ +fn foo() {} + +fn main() { + let f: &Fn() = &foo; + f(); +} diff --git a/tests/run-pass/fn_item_with_args_as_closure_trait_object.rs b/tests/run-pass/fn_item_with_args_as_closure_trait_object.rs new file mode 100644 index 0000000000000..79ece75c773bb --- /dev/null +++ b/tests/run-pass/fn_item_with_args_as_closure_trait_object.rs @@ -0,0 +1,8 @@ +fn foo(i: i32) { + assert_eq!(i, 42); +} + +fn main() { + let f: &Fn(i32) = &foo; + f(42); +} diff --git a/tests/run-pass/fn_item_with_multiple_args_as_closure_trait_object.rs b/tests/run-pass/fn_item_with_multiple_args_as_closure_trait_object.rs new file mode 100644 index 0000000000000..f4b5b449aa587 --- /dev/null +++ b/tests/run-pass/fn_item_with_multiple_args_as_closure_trait_object.rs @@ -0,0 +1,18 @@ +fn foo(i: i32, j: i32) { + assert_eq!(i, 42); + assert_eq!(j, 55); +} + +fn bar(i: i32, j: i32, k: f32) { + assert_eq!(i, 42); + assert_eq!(j, 55); + assert_eq!(k, 3.14159) +} + + +fn main() { + let f: &Fn(i32, i32) = &foo; + f(42, 55); + let f: &Fn(i32, i32, f32) = &bar; + f(42, 55, 3.14159); +} diff --git a/tests/run-pass/fn_ptr_as_closure_trait_object.rs b/tests/run-pass/fn_ptr_as_closure_trait_object.rs new file mode 100644 index 0000000000000..24ae1f35bb60b --- /dev/null +++ b/tests/run-pass/fn_ptr_as_closure_trait_object.rs @@ -0,0 +1,15 @@ +fn foo() {} +fn bar(u: u32) { assert_eq!(u, 42); } +fn baa(u: u32, f: f32) { + assert_eq!(u, 42); + assert_eq!(f, 3.141); +} + +fn main() { + let f: &Fn() = &(foo as fn()); + f(); + let f: &Fn(u32) = &(bar as fn(u32)); + f(42); + let f: &Fn(u32, f32) = &(baa as fn(u32, f32)); + f(42, 3.141); +} diff --git a/tests/run-pass/format.rs b/tests/run-pass/format.rs new file mode 100644 index 0000000000000..78729b915613a --- /dev/null +++ b/tests/run-pass/format.rs @@ -0,0 +1,3 @@ +fn main() { + println!("Hello {}", 13); +} diff --git a/tests/run-pass/function_pointers.rs b/tests/run-pass/function_pointers.rs new file mode 100644 index 0000000000000..4f597d4a2e94f --- /dev/null +++ b/tests/run-pass/function_pointers.rs @@ -0,0 +1,46 @@ +fn f() -> i32 { + 42 +} + +fn g(i: i32) -> i32 { + i*42 +} + +fn h(i: i32, j: i32) -> i32 { + j * i * 7 +} + +fn return_fn_ptr() -> fn() -> i32 { + f +} + +fn call_fn_ptr() -> i32 { + return_fn_ptr()() +} + +fn indirect i32>(f: F) -> i32 { f() } +fn indirect_mut i32>(mut f: F) -> i32 { f() } +fn indirect_once i32>(f: F) -> i32 { f() } + +fn indirect2 i32>(f: F) -> i32 { f(10) } +fn indirect_mut2 i32>(mut f: F) -> i32 { f(10) } +fn indirect_once2 i32>(f: F) -> i32 { f(10) } + +fn indirect3 i32>(f: F) -> i32 { f(10, 3) } +fn indirect_mut3 i32>(mut f: F) -> i32 { f(10, 3) } +fn indirect_once3 i32>(f: F) -> i32 { f(10, 3) } + +fn main() { + assert_eq!(call_fn_ptr(), 42); + assert_eq!(indirect(f), 42); + assert_eq!(indirect_mut(f), 42); + assert_eq!(indirect_once(f), 42); + assert_eq!(indirect2(g), 420); + assert_eq!(indirect_mut2(g), 420); + assert_eq!(indirect_once2(g), 420); + assert_eq!(indirect3(h), 210); + assert_eq!(indirect_mut3(h), 210); + assert_eq!(indirect_once3(h), 210); + assert!(return_fn_ptr() == f); + assert!(return_fn_ptr() as unsafe fn() -> i32 == f as fn() -> i32 as unsafe fn() -> i32); +} diff --git a/tests/run-pass/hello.rs b/tests/run-pass/hello.rs new file mode 100644 index 0000000000000..e7a11a969c037 --- /dev/null +++ b/tests/run-pass/hello.rs @@ -0,0 +1,3 @@ +fn main() { + println!("Hello, world!"); +} diff --git a/tests/run-pass/intrinsics-integer.rs b/tests/run-pass/intrinsics-integer.rs new file mode 100644 index 0000000000000..4896f02da20b0 --- /dev/null +++ b/tests/run-pass/intrinsics-integer.rs @@ -0,0 +1,142 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(intrinsics)] + +mod rusti { + extern "rust-intrinsic" { + pub fn ctpop(x: T) -> T; + pub fn ctlz(x: T) -> T; + pub fn ctlz_nonzero(x: T) -> T; + pub fn cttz(x: T) -> T; + pub fn cttz_nonzero(x: T) -> T; + pub fn bswap(x: T) -> T; + } +} + +pub fn main() { + unsafe { + use rusti::*; + + assert_eq!(ctpop(0u8), 0); assert_eq!(ctpop(0i8), 0); + assert_eq!(ctpop(0u16), 0); assert_eq!(ctpop(0i16), 0); + assert_eq!(ctpop(0u32), 0); assert_eq!(ctpop(0i32), 0); + assert_eq!(ctpop(0u64), 0); assert_eq!(ctpop(0i64), 0); + + assert_eq!(ctpop(1u8), 1); assert_eq!(ctpop(1i8), 1); + assert_eq!(ctpop(1u16), 1); assert_eq!(ctpop(1i16), 1); + assert_eq!(ctpop(1u32), 1); assert_eq!(ctpop(1i32), 1); + assert_eq!(ctpop(1u64), 1); assert_eq!(ctpop(1i64), 1); + + assert_eq!(ctpop(10u8), 2); assert_eq!(ctpop(10i8), 2); + assert_eq!(ctpop(10u16), 2); assert_eq!(ctpop(10i16), 2); + assert_eq!(ctpop(10u32), 2); assert_eq!(ctpop(10i32), 2); + assert_eq!(ctpop(10u64), 2); assert_eq!(ctpop(10i64), 2); + + assert_eq!(ctpop(100u8), 3); assert_eq!(ctpop(100i8), 3); + assert_eq!(ctpop(100u16), 3); assert_eq!(ctpop(100i16), 3); + assert_eq!(ctpop(100u32), 3); assert_eq!(ctpop(100i32), 3); + assert_eq!(ctpop(100u64), 3); assert_eq!(ctpop(100i64), 3); + + assert_eq!(ctpop(-1i8 as u8), 8); assert_eq!(ctpop(-1i8), 8); + assert_eq!(ctpop(-1i16 as u16), 16); assert_eq!(ctpop(-1i16), 16); + assert_eq!(ctpop(-1i32 as u32), 32); assert_eq!(ctpop(-1i32), 32); + assert_eq!(ctpop(-1i64 as u64), 64); assert_eq!(ctpop(-1i64), 64); + + assert_eq!(ctlz(0u8), 8); assert_eq!(ctlz(0i8), 8); + assert_eq!(ctlz(0u16), 16); assert_eq!(ctlz(0i16), 16); + assert_eq!(ctlz(0u32), 32); assert_eq!(ctlz(0i32), 32); + assert_eq!(ctlz(0u64), 64); assert_eq!(ctlz(0i64), 64); + + assert_eq!(ctlz(1u8), 7); assert_eq!(ctlz(1i8), 7); + assert_eq!(ctlz(1u16), 15); assert_eq!(ctlz(1i16), 15); + assert_eq!(ctlz(1u32), 31); assert_eq!(ctlz(1i32), 31); + assert_eq!(ctlz(1u64), 63); assert_eq!(ctlz(1i64), 63); + + assert_eq!(ctlz(10u8), 4); assert_eq!(ctlz(10i8), 4); + assert_eq!(ctlz(10u16), 12); assert_eq!(ctlz(10i16), 12); + assert_eq!(ctlz(10u32), 28); assert_eq!(ctlz(10i32), 28); + assert_eq!(ctlz(10u64), 60); assert_eq!(ctlz(10i64), 60); + + assert_eq!(ctlz(100u8), 1); assert_eq!(ctlz(100i8), 1); + assert_eq!(ctlz(100u16), 9); assert_eq!(ctlz(100i16), 9); + assert_eq!(ctlz(100u32), 25); assert_eq!(ctlz(100i32), 25); + assert_eq!(ctlz(100u64), 57); assert_eq!(ctlz(100i64), 57); + + assert_eq!(ctlz_nonzero(1u8), 7); assert_eq!(ctlz_nonzero(1i8), 7); + assert_eq!(ctlz_nonzero(1u16), 15); assert_eq!(ctlz_nonzero(1i16), 15); + assert_eq!(ctlz_nonzero(1u32), 31); assert_eq!(ctlz_nonzero(1i32), 31); + assert_eq!(ctlz_nonzero(1u64), 63); assert_eq!(ctlz_nonzero(1i64), 63); + + assert_eq!(ctlz_nonzero(10u8), 4); assert_eq!(ctlz_nonzero(10i8), 4); + assert_eq!(ctlz_nonzero(10u16), 12); assert_eq!(ctlz_nonzero(10i16), 12); + assert_eq!(ctlz_nonzero(10u32), 28); assert_eq!(ctlz_nonzero(10i32), 28); + assert_eq!(ctlz_nonzero(10u64), 60); assert_eq!(ctlz_nonzero(10i64), 60); + + assert_eq!(ctlz_nonzero(100u8), 1); assert_eq!(ctlz_nonzero(100i8), 1); + assert_eq!(ctlz_nonzero(100u16), 9); assert_eq!(ctlz_nonzero(100i16), 9); + assert_eq!(ctlz_nonzero(100u32), 25); assert_eq!(ctlz_nonzero(100i32), 25); + assert_eq!(ctlz_nonzero(100u64), 57); assert_eq!(ctlz_nonzero(100i64), 57); + + assert_eq!(cttz(-1i8 as u8), 0); assert_eq!(cttz(-1i8), 0); + assert_eq!(cttz(-1i16 as u16), 0); assert_eq!(cttz(-1i16), 0); + assert_eq!(cttz(-1i32 as u32), 0); assert_eq!(cttz(-1i32), 0); + assert_eq!(cttz(-1i64 as u64), 0); assert_eq!(cttz(-1i64), 0); + + assert_eq!(cttz(0u8), 8); assert_eq!(cttz(0i8), 8); + assert_eq!(cttz(0u16), 16); assert_eq!(cttz(0i16), 16); + assert_eq!(cttz(0u32), 32); assert_eq!(cttz(0i32), 32); + assert_eq!(cttz(0u64), 64); assert_eq!(cttz(0i64), 64); + + assert_eq!(cttz(1u8), 0); assert_eq!(cttz(1i8), 0); + assert_eq!(cttz(1u16), 0); assert_eq!(cttz(1i16), 0); + assert_eq!(cttz(1u32), 0); assert_eq!(cttz(1i32), 0); + assert_eq!(cttz(1u64), 0); assert_eq!(cttz(1i64), 0); + + assert_eq!(cttz(10u8), 1); assert_eq!(cttz(10i8), 1); + assert_eq!(cttz(10u16), 1); assert_eq!(cttz(10i16), 1); + assert_eq!(cttz(10u32), 1); assert_eq!(cttz(10i32), 1); + assert_eq!(cttz(10u64), 1); assert_eq!(cttz(10i64), 1); + + assert_eq!(cttz(100u8), 2); assert_eq!(cttz(100i8), 2); + assert_eq!(cttz(100u16), 2); assert_eq!(cttz(100i16), 2); + assert_eq!(cttz(100u32), 2); assert_eq!(cttz(100i32), 2); + assert_eq!(cttz(100u64), 2); assert_eq!(cttz(100i64), 2); + + assert_eq!(cttz_nonzero(-1i8 as u8), 0); assert_eq!(cttz_nonzero(-1i8), 0); + assert_eq!(cttz_nonzero(-1i16 as u16), 0); assert_eq!(cttz_nonzero(-1i16), 0); + assert_eq!(cttz_nonzero(-1i32 as u32), 0); assert_eq!(cttz_nonzero(-1i32), 0); + assert_eq!(cttz_nonzero(-1i64 as u64), 0); assert_eq!(cttz_nonzero(-1i64), 0); + + assert_eq!(cttz_nonzero(1u8), 0); assert_eq!(cttz_nonzero(1i8), 0); + assert_eq!(cttz_nonzero(1u16), 0); assert_eq!(cttz_nonzero(1i16), 0); + assert_eq!(cttz_nonzero(1u32), 0); assert_eq!(cttz_nonzero(1i32), 0); + assert_eq!(cttz_nonzero(1u64), 0); assert_eq!(cttz_nonzero(1i64), 0); + + assert_eq!(cttz_nonzero(10u8), 1); assert_eq!(cttz_nonzero(10i8), 1); + assert_eq!(cttz_nonzero(10u16), 1); assert_eq!(cttz_nonzero(10i16), 1); + assert_eq!(cttz_nonzero(10u32), 1); assert_eq!(cttz_nonzero(10i32), 1); + assert_eq!(cttz_nonzero(10u64), 1); assert_eq!(cttz_nonzero(10i64), 1); + + assert_eq!(cttz_nonzero(100u8), 2); assert_eq!(cttz_nonzero(100i8), 2); + assert_eq!(cttz_nonzero(100u16), 2); assert_eq!(cttz_nonzero(100i16), 2); + assert_eq!(cttz_nonzero(100u32), 2); assert_eq!(cttz_nonzero(100i32), 2); + assert_eq!(cttz_nonzero(100u64), 2); assert_eq!(cttz_nonzero(100i64), 2); + + assert_eq!(bswap(0x0Au8), 0x0A); // no-op + assert_eq!(bswap(0x0Ai8), 0x0A); // no-op + assert_eq!(bswap(0x0A0Bu16), 0x0B0A); + assert_eq!(bswap(0x0A0Bi16), 0x0B0A); + assert_eq!(bswap(0x0ABBCC0Du32), 0x0DCCBB0A); + assert_eq!(bswap(0x0ABBCC0Di32), 0x0DCCBB0A); + assert_eq!(bswap(0x0122334455667708u64), 0x0877665544332201); + assert_eq!(bswap(0x0122334455667708i64), 0x0877665544332201); + } +} diff --git a/tests/run-pass/intrinsics-math.rs b/tests/run-pass/intrinsics-math.rs new file mode 100644 index 0000000000000..a2c55634749cb --- /dev/null +++ b/tests/run-pass/intrinsics-math.rs @@ -0,0 +1,67 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +macro_rules! assert_approx_eq { + ($a:expr, $b:expr) => ({ + let (a, b) = (&$a, &$b); + assert!((*a - *b).abs() < 1.0e-6, + "{} is not approximately equal to {}", *a, *b); + }) +} + +pub fn main() { + use std::f32; + use std::f64; + + assert_approx_eq!(64f32.sqrt(), 8f32); + assert_approx_eq!(64f64.sqrt(), 8f64); + + assert_approx_eq!(25f32.powi(-2), 0.0016f32); + assert_approx_eq!(23.2f64.powi(2), 538.24f64); + + assert_approx_eq!(0f32.sin(), 0f32); + assert_approx_eq!((f64::consts::PI / 2f64).sin(), 1f64); + + assert_approx_eq!(0f32.cos(), 1f32); + assert_approx_eq!((f64::consts::PI * 2f64).cos(), 1f64); + + assert_approx_eq!(25f32.powf(-2f32), 0.0016f32); + assert_approx_eq!(400f64.powf(0.5f64), 20f64); + + assert_approx_eq!((1f32.exp() - f32::consts::E).abs(), 0f32); + assert_approx_eq!(1f64.exp(), f64::consts::E); + + assert_approx_eq!(10f32.exp2(), 1024f32); + assert_approx_eq!(50f64.exp2(), 1125899906842624f64); + + assert_approx_eq!((f32::consts::E.ln() - 1f32).abs(), 0f32); + assert_approx_eq!(1f64.ln(), 0f64); + + assert_approx_eq!(10f32.log10(), 1f32); + assert_approx_eq!(f64::consts::E.log10(), f64::consts::LOG10_E); + + assert_approx_eq!(8f32.log2(), 3f32); + assert_approx_eq!(f64::consts::E.log2(), f64::consts::LOG2_E); + + assert_approx_eq!(1.0f32.mul_add(2.0f32, 5.0f32), 7.0f32); + assert_approx_eq!(0.0f64.mul_add(-2.0f64, f64::consts::E), f64::consts::E); + + assert_approx_eq!((-1.0f32).abs(), 1.0f32); + assert_approx_eq!(34.2f64.abs(), 34.2f64); + + assert_approx_eq!(3.8f32.floor(), 3.0f32); + assert_approx_eq!((-1.1f64).floor(), -2.0f64); + + assert_approx_eq!((-2.3f32).ceil(), -2.0f32); + assert_approx_eq!(3.8f64.ceil(), 4.0f64); + + assert_approx_eq!(0.1f32.trunc(), 0.0f32); + assert_approx_eq!((-0.1f64).trunc(), 0.0f64); +} diff --git a/tests/run-pass/intrinsics.rs b/tests/run-pass/intrinsics.rs new file mode 100755 index 0000000000000..3152737a601ca --- /dev/null +++ b/tests/run-pass/intrinsics.rs @@ -0,0 +1,10 @@ +use std::mem::{size_of, size_of_val}; + +fn main() { + assert_eq!(size_of::>(), 8); + assert_eq!(size_of_val(&()), 0); + assert_eq!(size_of_val(&42), 4); + assert_eq!(size_of_val(&[] as &[i32]), 0); + assert_eq!(size_of_val(&[1, 2, 3] as &[i32]), 12); + assert_eq!(size_of_val("foobar"), 6); +} diff --git a/tests/run-pass/ints.rs b/tests/run-pass/ints.rs new file mode 100644 index 0000000000000..4f23b5ec9c381 --- /dev/null +++ b/tests/run-pass/ints.rs @@ -0,0 +1,58 @@ +fn ret() -> i64 { + 1 +} + +fn neg() -> i64 { + -1 +} + +fn add() -> i64 { + 1 + 2 +} + +fn indirect_add() -> i64 { + let x = 1; + let y = 2; + x + y +} + +fn arith() -> i32 { + 3*3 + 4*4 +} + +fn match_int() -> i16 { + let n = 2; + match n { + 0 => 0, + 1 => 10, + 2 => 20, + 3 => 30, + _ => 100, + } +} + +fn match_int_range() -> i64 { + let n = 42; + match n { + 0...9 => 0, + 10...19 => 1, + 20...29 => 2, + 30...39 => 3, + 40...49 => 4, + _ => 5, + } +} + +fn main() { + assert_eq!(ret(), 1); + assert_eq!(neg(), -1); + assert_eq!(add(), 3); + assert_eq!(indirect_add(), 3); + assert_eq!(arith(), 5*5); + assert_eq!(match_int(), 20); + assert_eq!(match_int_range(), 4); + assert_eq!(i64::min_value().overflowing_mul(-1), (i64::min_value(), true)); + assert_eq!(i32::min_value().overflowing_mul(-1), (i32::min_value(), true)); + assert_eq!(i16::min_value().overflowing_mul(-1), (i16::min_value(), true)); + assert_eq!(i8::min_value().overflowing_mul(-1), (i8::min_value(), true)); +} diff --git a/tests/run-pass/issue-15063.rs b/tests/run-pass/issue-15063.rs new file mode 100644 index 0000000000000..726aee283e292 --- /dev/null +++ b/tests/run-pass/issue-15063.rs @@ -0,0 +1,20 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(dead_code)] + +enum Two { A, B } +impl Drop for Two { + fn drop(&mut self) { + } +} +fn main() { + let _k = Two::A; +} diff --git a/tests/run-pass/issue-15523-big.rs b/tests/run-pass/issue-15523-big.rs new file mode 100644 index 0000000000000..33c81cab3817b --- /dev/null +++ b/tests/run-pass/issue-15523-big.rs @@ -0,0 +1,48 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Issue 15523: derive(PartialOrd) should use the provided +// discriminant values for the derived ordering. +// +// This test is checking corner cases that arise when you have +// 64-bit values in the variants. + +#[derive(PartialEq, PartialOrd)] +#[repr(u64)] +enum Eu64 { + Pos2 = 2, + PosMax = !0, + Pos1 = 1, +} + +#[derive(PartialEq, PartialOrd)] +#[repr(i64)] +enum Ei64 { + Pos2 = 2, + Neg1 = -1, + NegMin = 1 << 63, + PosMax = !(1 << 63), + Pos1 = 1, +} + +fn main() { + assert!(Eu64::Pos2 > Eu64::Pos1); + assert!(Eu64::Pos2 < Eu64::PosMax); + assert!(Eu64::Pos1 < Eu64::PosMax); + + + assert!(Ei64::Pos2 > Ei64::Pos1); + assert!(Ei64::Pos2 > Ei64::Neg1); + assert!(Ei64::Pos1 > Ei64::Neg1); + assert!(Ei64::Pos2 > Ei64::NegMin); + assert!(Ei64::Pos1 > Ei64::NegMin); + assert!(Ei64::Pos2 < Ei64::PosMax); + assert!(Ei64::Pos1 < Ei64::PosMax); +} diff --git a/tests/run-pass/issue-17877.rs b/tests/run-pass/issue-17877.rs new file mode 100644 index 0000000000000..6c87e8d35fbf0 --- /dev/null +++ b/tests/run-pass/issue-17877.rs @@ -0,0 +1,24 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +#![feature(slice_patterns)] + +fn main() { + assert_eq!(match [0u8; 1024] { + _ => 42_usize, + }, 42_usize); + + assert_eq!(match [0u8; 1024] { + [1, _..] => 0_usize, + [0, _..] => 1_usize, + _ => 2_usize + }, 1_usize); +} diff --git a/tests/run-pass/issue-20575.rs b/tests/run-pass/issue-20575.rs new file mode 100644 index 0000000000000..7db7e3b28e8e6 --- /dev/null +++ b/tests/run-pass/issue-20575.rs @@ -0,0 +1,19 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Test that overloaded calls work with zero arity closures + +// pretty-expanded FIXME #23616 + +fn main() { + let functions: [Box Option<()>>; 1] = [Box::new(|| None)]; + + let _: Option> = functions.iter().map(|f| (*f)()).collect(); +} diff --git a/tests/run-pass/issue-23261.rs b/tests/run-pass/issue-23261.rs new file mode 100644 index 0000000000000..fc806f5429a47 --- /dev/null +++ b/tests/run-pass/issue-23261.rs @@ -0,0 +1,70 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Matching on a DST struct should not trigger an LLVM assertion. + +struct Foo { + a: i32, + inner: T +} + +trait Get { + fn get(&self) -> i32; +} + +impl Get for i32 { + fn get(&self) -> i32 { + *self + } +} + +fn check_val(val: &Foo<[u8]>) { + match *val { + Foo { a, .. } => { + assert_eq!(a, 32); + } + } +} + +fn check_dst_val(val: &Foo<[u8]>) { + match *val { + Foo { ref inner, .. } => { + assert_eq!(inner, [1, 2, 3]); + } + } +} + +fn check_both(val: &Foo<[u8]>) { + match *val { + Foo { a, ref inner } => { + assert_eq!(a, 32); + assert_eq!(inner, [1, 2, 3]); + } + } +} + +fn check_trait_obj(val: &Foo) { + match *val { + Foo { a, ref inner } => { + assert_eq!(a, 32); + assert_eq!(inner.get(), 32); + } + } +} + +fn main() { + let foo: &Foo<[u8]> = &Foo { a: 32, inner: [1, 2, 3] }; + check_val(foo); + check_dst_val(foo); + check_both(foo); + + let foo: &Foo = &Foo { a: 32, inner: 32 }; + check_trait_obj(foo); +} diff --git a/tests/run-pass/issue-26709.rs b/tests/run-pass/issue-26709.rs new file mode 100644 index 0000000000000..62626d75865cf --- /dev/null +++ b/tests/run-pass/issue-26709.rs @@ -0,0 +1,26 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +struct Wrapper<'a, T: ?Sized>(&'a mut i32, T); + +impl<'a, T: ?Sized> Drop for Wrapper<'a, T> { + fn drop(&mut self) { + *self.0 = 432; + } +} + +fn main() { + let mut x = 0; + { + let wrapper = Box::new(Wrapper(&mut x, 123)); + let _: Box> = wrapper; + } + assert_eq!(432, x) +} diff --git a/tests/run-pass/issue-30530.rs b/tests/run-pass/issue-30530.rs new file mode 100644 index 0000000000000..d5139c908bdac --- /dev/null +++ b/tests/run-pass/issue-30530.rs @@ -0,0 +1,35 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Regression test for Issue #30530: alloca's created for storing +// intermediate scratch values during brace-less match arms need to be +// initialized with their drop-flag set to "dropped" (or else we end +// up running the destructors on garbage data at the end of the +// function). + +pub enum Handler { + Default, + #[allow(dead_code)] + Custom(*mut Box), +} + +fn main() { + take(Handler::Default, Box::new(main)); +} + +#[inline(never)] +pub fn take(h: Handler, f: Box) -> Box { + unsafe { + match h { + Handler::Custom(ptr) => *Box::from_raw(ptr), + Handler::Default => f, + } + } +} diff --git a/tests/run-pass/issue-31267-additional.rs b/tests/run-pass/issue-31267-additional.rs new file mode 100644 index 0000000000000..14e38f43c527b --- /dev/null +++ b/tests/run-pass/issue-31267-additional.rs @@ -0,0 +1,29 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(unused_variables)] + +#[derive(Clone, Copy, Debug)] +struct Bar; + +const BAZ: Bar = Bar; + +#[derive(Debug)] +struct Foo([Bar; 1]); + +struct Biz; + +impl Biz { + const BAZ: Foo = Foo([BAZ; 1]); +} + +fn main() { + let foo = Biz::BAZ; +} diff --git a/tests/run-pass/issue-33387.rs b/tests/run-pass/issue-33387.rs new file mode 100644 index 0000000000000..edbf2b81ce941 --- /dev/null +++ b/tests/run-pass/issue-33387.rs @@ -0,0 +1,19 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::sync::Arc; + +trait Foo {} + +impl Foo for [u8; 2] {} + +fn main() { + let _: Arc = Arc::new([3, 4]); +} diff --git a/tests/run-pass/issue-35815.rs b/tests/run-pass/issue-35815.rs new file mode 100644 index 0000000000000..216e06c0732c8 --- /dev/null +++ b/tests/run-pass/issue-35815.rs @@ -0,0 +1,25 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(dead_code)] + +use std::mem; + +struct Foo { + a: i64, + b: bool, + c: T, +} + +fn main() { + let foo: &Foo = &Foo { a: 1, b: false, c: 2i32 }; + let foo_unsized: &Foo = foo; + assert_eq!(mem::size_of_val(foo), mem::size_of_val(foo_unsized)); +} diff --git a/tests/run-pass/issue-36278-prefix-nesting.rs b/tests/run-pass/issue-36278-prefix-nesting.rs new file mode 100644 index 0000000000000..95269d0569dec --- /dev/null +++ b/tests/run-pass/issue-36278-prefix-nesting.rs @@ -0,0 +1,28 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Issue 36278: On an unsized struct with >1 level of nontrivial +// nesting, ensure we are computing dynamic size of prefix correctly. + +use std::mem; + +const SZ: usize = 100; +struct P([u8; SZ], T); + +type Ack = P>; + +fn main() { + let size_of_sized; let size_of_unsized; + let x: Box> = Box::new(P([0; SZ], P([0; SZ], [0; 0]))); + size_of_sized = mem::size_of_val::>(&x); + let y: Box> = x; + size_of_unsized = mem::size_of_val::>(&y); + assert_eq!(size_of_sized, size_of_unsized); +} diff --git a/tests/run-pass/issue-3794.rs b/tests/run-pass/issue-3794.rs new file mode 100644 index 0000000000000..badb833ee800b --- /dev/null +++ b/tests/run-pass/issue-3794.rs @@ -0,0 +1,41 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(box_syntax)] + +trait T { + fn print(&self); +} + +#[derive(Debug)] +struct S { + s: isize, +} + +impl T for S { + fn print(&self) { + println!("{:?}", self); + } +} + +fn print_t(t: &T) { + t.print(); +} + +fn print_s(s: &S) { + s.print(); +} + +pub fn main() { + let s: Box = box S { s: 5 }; + print_s(&*s); + let t: Box = s as Box; + print_t(&*t); +} diff --git a/tests/run-pass/issue-5917.rs b/tests/run-pass/issue-5917.rs new file mode 100644 index 0000000000000..69b95f2cd7e10 --- /dev/null +++ b/tests/run-pass/issue-5917.rs @@ -0,0 +1,17 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +struct T (&'static [isize]); +static STATIC : T = T (&[5, 4, 3]); +pub fn main () { + let T(ref v) = STATIC; + assert_eq!(v[0], 5); +} diff --git a/tests/run-pass/issue-miri-184.rs b/tests/run-pass/issue-miri-184.rs new file mode 100644 index 0000000000000..24775fe8a2d9d --- /dev/null +++ b/tests/run-pass/issue-miri-184.rs @@ -0,0 +1,4 @@ +pub fn main() { + let bytes: [u8; 8] = unsafe { ::std::mem::transmute(0u64) }; + let _: &[u8] = &bytes; +} diff --git a/tests/run-pass/iter_slice.rs b/tests/run-pass/iter_slice.rs new file mode 100644 index 0000000000000..fd7229c3455e4 --- /dev/null +++ b/tests/run-pass/iter_slice.rs @@ -0,0 +1,12 @@ +fn main() { + for _ in Vec::::new().iter() { // this iterates over a Unique::empty() + panic!("We should never be here."); + } + + // Iterate over a ZST (uses arith_offset internally) + let mut count = 0; + for _ in &[(), (), ()] { + count += 1; + } + assert_eq!(count, 3); +} diff --git a/tests/run-pass/last-use-in-cap-clause.rs b/tests/run-pass/last-use-in-cap-clause.rs new file mode 100644 index 0000000000000..de2d815ca54eb --- /dev/null +++ b/tests/run-pass/last-use-in-cap-clause.rs @@ -0,0 +1,25 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Make sure #1399 stays fixed + +#[allow(dead_code)] +struct A { a: Box } + +fn foo() -> Box isize + 'static> { + let k: Box<_> = Box::new(22); + let _u = A {a: k.clone()}; + let result = || 22; + Box::new(result) +} + +pub fn main() { + assert_eq!(foo()(), 22); +} diff --git a/tests/run-pass/loops.rs b/tests/run-pass/loops.rs new file mode 100644 index 0000000000000..222287cbe09ad --- /dev/null +++ b/tests/run-pass/loops.rs @@ -0,0 +1,35 @@ +fn factorial_loop() -> i64 { + let mut product = 1; + let mut i = 1; + + while i <= 10 { + product *= i; + i += 1; + } + + product +} + +fn index_for_loop() -> usize { + let mut sum = 0; + let a = [0, 10, 20, 30]; + for i in 0..a.len() { + sum += a[i]; + } + sum +} + +fn for_loop() -> usize { + let mut sum = 0; + let a = [0, 10, 20, 30]; + for &n in &a { + sum += n; + } + sum +} + +fn main() { + assert_eq!(factorial_loop(), 3628800); + assert_eq!(index_for_loop(), 60); + assert_eq!(for_loop(), 60); +} diff --git a/tests/run-pass/main_fn.rs b/tests/run-pass/main_fn.rs new file mode 100644 index 0000000000000..91d183ee6af70 --- /dev/null +++ b/tests/run-pass/main_fn.rs @@ -0,0 +1,5 @@ +#![feature(main)] + +#[main] +fn foo() { +} diff --git a/tests/run-pass/match_slice.rs b/tests/run-pass/match_slice.rs new file mode 100644 index 0000000000000..568a1a1c88182 --- /dev/null +++ b/tests/run-pass/match_slice.rs @@ -0,0 +1,8 @@ +fn main() { + let x = "hello"; + match x { + "foo" => {}, + "bar" => {}, + _ => {}, + } +} diff --git a/tests/run-pass/mir_coercions.rs b/tests/run-pass/mir_coercions.rs new file mode 100644 index 0000000000000..36155297e32f0 --- /dev/null +++ b/tests/run-pass/mir_coercions.rs @@ -0,0 +1,80 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(coerce_unsized, unsize)] + +use std::ops::CoerceUnsized; +use std::marker::Unsize; + +fn identity_coercion(x: &(Fn(u32)->u32 + Send)) -> &Fn(u32)->u32 { + x +} +fn fn_coercions(f: &fn(u32) -> u32) -> + (unsafe fn(u32) -> u32, + &(Fn(u32) -> u32+Send)) +{ + (*f, f) +} + +fn simple_array_coercion(x: &[u8; 3]) -> &[u8] { x } + +fn square(a: u32) -> u32 { a * a } + +#[derive(PartialEq,Eq)] +struct PtrWrapper<'a, T: 'a+?Sized>(u32, u32, (), &'a T); +impl<'a, T: ?Sized+Unsize, U: ?Sized> + CoerceUnsized> for PtrWrapper<'a, T> {} + +struct TrivPtrWrapper<'a, T: 'a+?Sized>(&'a T); +impl<'a, T: ?Sized+Unsize, U: ?Sized> + CoerceUnsized> for TrivPtrWrapper<'a, T> {} + +fn coerce_ptr_wrapper(p: PtrWrapper<[u8; 3]>) -> PtrWrapper<[u8]> { + p +} + +fn coerce_triv_ptr_wrapper(p: TrivPtrWrapper<[u8; 3]>) -> TrivPtrWrapper<[u8]> { + p +} + +fn coerce_fat_ptr_wrapper(p: PtrWrapper u32+Send>) + -> PtrWrapper u32> { + p +} + +fn coerce_ptr_wrapper_poly<'a, T, Trait: ?Sized>(p: PtrWrapper<'a, T>) + -> PtrWrapper<'a, Trait> + where PtrWrapper<'a, T>: CoerceUnsized> +{ + p +} + +fn main() { + let a = [0,1,2]; + let square_local : fn(u32) -> u32 = square; + let (f,g) = fn_coercions(&square_local); + assert_eq!(f as *const (), square as *const()); + assert_eq!(g(4), 16); + assert_eq!(identity_coercion(g)(5), 25); + + assert_eq!(simple_array_coercion(&a), &a); + let w = coerce_ptr_wrapper(PtrWrapper(2,3,(),&a)); + assert!(w == PtrWrapper(2,3,(),&a) as PtrWrapper<[u8]>); + + let w = coerce_triv_ptr_wrapper(TrivPtrWrapper(&a)); + assert_eq!(&w.0, &a); + + let z = coerce_fat_ptr_wrapper(PtrWrapper(2,3,(),&square_local)); + assert_eq!((z.3)(6), 36); + + let z: PtrWrapper u32> = + coerce_ptr_wrapper_poly(PtrWrapper(2,3,(),&square_local)); + assert_eq!((z.3)(6), 36); +} diff --git a/tests/run-pass/mir_fat_ptr.rs b/tests/run-pass/mir_fat_ptr.rs new file mode 100644 index 0000000000000..e5c9e3577d1c3 --- /dev/null +++ b/tests/run-pass/mir_fat_ptr.rs @@ -0,0 +1,61 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// test that ordinary fat pointer operations work. + +struct Wrapper(u32, T); + +struct FatPtrContainer<'a> { + ptr: &'a [u8] +} + +fn fat_ptr_project(a: &Wrapper<[u8]>) -> &[u8] { + &a.1 +} + +fn fat_ptr_simple(a: &[u8]) -> &[u8] { + a +} + +fn fat_ptr_via_local(a: &[u8]) -> &[u8] { + let x = a; + x +} + +fn fat_ptr_from_struct(s: FatPtrContainer) -> &[u8] { + s.ptr +} + +fn fat_ptr_to_struct(a: &[u8]) -> FatPtrContainer { + FatPtrContainer { ptr: a } +} + +fn fat_ptr_store_to<'a>(a: &'a [u8], b: &mut &'a [u8]) { + *b = a; +} + +fn fat_ptr_constant() -> &'static str { + "HELLO" +} + +fn main() { + let a = Wrapper(4, [7,6,5]); + + let p = fat_ptr_project(&a); + let p = fat_ptr_simple(p); + let p = fat_ptr_via_local(p); + let p = fat_ptr_from_struct(fat_ptr_to_struct(p)); + + let mut target : &[u8] = &[42]; + fat_ptr_store_to(p, &mut target); + assert_eq!(target, &a.1); + + assert_eq!(fat_ptr_constant(), "HELLO"); +} diff --git a/tests/run-pass/miri-issue-133.rs b/tests/run-pass/miri-issue-133.rs new file mode 100644 index 0000000000000..406b5e102c8b4 --- /dev/null +++ b/tests/run-pass/miri-issue-133.rs @@ -0,0 +1,30 @@ +use std::mem::size_of; + +struct S { + _u: U, + size_of_u: usize, + _v: V, + size_of_v: usize +} + +impl S { + fn new(u: U, v: V) -> Self { + S { + _u: u, + size_of_u: size_of::(), + _v: v, + size_of_v: size_of::() + } + } +} + +impl Drop for S { + fn drop(&mut self) { + assert_eq!(size_of::(), self.size_of_u); + assert_eq!(size_of::(), self.size_of_v); + } +} + +fn main() { + S::new(0u8, 1u16); +} diff --git a/tests/run-pass/move-arg-3-unique.rs b/tests/run-pass/move-arg-3-unique.rs new file mode 100644 index 0000000000000..2e6320eb80257 --- /dev/null +++ b/tests/run-pass/move-arg-3-unique.rs @@ -0,0 +1,18 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(unused_features, unused_variables)] +#![feature(box_syntax)] + +pub fn main() { + let x = box 10; + let y = x; + assert_eq!(*y, 10); +} diff --git a/tests/run-pass/move-undef-primval.rs b/tests/run-pass/move-undef-primval.rs new file mode 100644 index 0000000000000..73c33943a63ac --- /dev/null +++ b/tests/run-pass/move-undef-primval.rs @@ -0,0 +1,12 @@ +struct Foo { + _inner: i32, +} + +fn main() { + unsafe { + let foo = Foo { + _inner: std::mem::uninitialized(), + }; + let _bar = foo; + } +} diff --git a/tests/run-pass/multi_arg_closure.rs b/tests/run-pass/multi_arg_closure.rs new file mode 100644 index 0000000000000..30cfb5b685b20 --- /dev/null +++ b/tests/run-pass/multi_arg_closure.rs @@ -0,0 +1,8 @@ +fn foo(f: &mut FnMut(isize, isize) -> isize) -> isize { + f(1, 2) +} + +fn main() { + let z = foo(&mut |x, y| x * 10 + y); + assert_eq!(z, 12); +} diff --git a/tests/run-pass/negative_discriminant.rs b/tests/run-pass/negative_discriminant.rs new file mode 100644 index 0000000000000..16f175e7dfc80 --- /dev/null +++ b/tests/run-pass/negative_discriminant.rs @@ -0,0 +1,13 @@ +enum AB { A = -1, B = 1 } + +fn main() { + match AB::A { + AB::A => (), + AB::B => panic!(), + } + + match AB::B { + AB::A => panic!(), + AB::B => (), + } +} diff --git a/tests/run-pass/non_capture_closure_to_fn_ptr.rs b/tests/run-pass/non_capture_closure_to_fn_ptr.rs new file mode 100644 index 0000000000000..c9daff9c9f469 --- /dev/null +++ b/tests/run-pass/non_capture_closure_to_fn_ptr.rs @@ -0,0 +1,14 @@ +// allow(const_err) to work around a bug in warnings +#[allow(const_err)] +static FOO: fn() = || { assert_ne!(42, 43) }; +#[allow(const_err)] +static BAR: fn(i32, i32) = |a, b| { assert_ne!(a, b) }; + +fn main() { + FOO(); + BAR(44, 45); + let bar: unsafe fn(i32, i32) = BAR; + unsafe { bar(46, 47) }; + let boo: &Fn(i32, i32) = &BAR; + boo(48, 49); +} diff --git a/tests/run-pass/observed_local_mut.rs b/tests/run-pass/observed_local_mut.rs new file mode 100644 index 0000000000000..a4ecf1e635d24 --- /dev/null +++ b/tests/run-pass/observed_local_mut.rs @@ -0,0 +1,21 @@ +// This test is intended to guard against the problem described in commit +// 39bb1254d1eaf74f45a4e741097e33fc942168d5. +// +// As written, it might be considered UB in compiled Rust, but of course Miri gives it a safe, +// deterministic behaviour (one that might not correspond with how an eventual Rust spec would +// defined this). +// +// An alternative way to write the test without `unsafe` would be to use `Cell`, but it would +// only surface the bug described by the above commit if `Cell` on the stack got represented +// as a primitive `PrimVal::I32` which is not yet the case. + +fn main() { + let mut x = 0; + let y: *const i32 = &x; + x = 1; + + // When the described bug is in place, this results in `0`, not observing the `x = 1` line. + assert_eq!(unsafe { *y }, 1); + + assert_eq!(x, 1); +} diff --git a/tests/run-pass/option_box_transmute_ptr.rs b/tests/run-pass/option_box_transmute_ptr.rs new file mode 100644 index 0000000000000..0786db1ef895a --- /dev/null +++ b/tests/run-pass/option_box_transmute_ptr.rs @@ -0,0 +1,15 @@ +// This tests that the size of Option> is the same as *const i32. +fn option_box_deref() -> i32 { + let val = Some(Box::new(42)); + unsafe { + let ptr: *const i32 = std::mem::transmute::>, *const i32>(val); + let ret = *ptr; + // unleak memory + std::mem::transmute::<*const i32, Option>>(ptr); + ret + } +} + +fn main() { + assert_eq!(option_box_deref(), 42); +} diff --git a/tests/run-pass/option_eq.rs b/tests/run-pass/option_eq.rs new file mode 100644 index 0000000000000..e698f8767746c --- /dev/null +++ b/tests/run-pass/option_eq.rs @@ -0,0 +1,3 @@ +fn main() { + assert_eq!(std::char::from_u32('x' as u32), Some('x')); +} diff --git a/tests/run-pass/overloaded-calls-simple.rs b/tests/run-pass/overloaded-calls-simple.rs new file mode 100644 index 0000000000000..1eeda12ca06f8 --- /dev/null +++ b/tests/run-pass/overloaded-calls-simple.rs @@ -0,0 +1,33 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +#![feature(lang_items, unboxed_closures, fn_traits)] + +struct S3 { + x: i32, + y: i32, +} + +impl FnOnce<(i32,i32)> for S3 { + type Output = i32; + extern "rust-call" fn call_once(self, (z,zz): (i32,i32)) -> i32 { + self.x * self.y * z * zz + } +} + +fn main() { + let s = S3 { + x: 3, + y: 3, + }; + let ans = s(3, 1); + assert_eq!(ans, 27); +} diff --git a/tests/run-pass/packed_struct.rs b/tests/run-pass/packed_struct.rs new file mode 100644 index 0000000000000..7219649e728c9 --- /dev/null +++ b/tests/run-pass/packed_struct.rs @@ -0,0 +1,65 @@ +#![allow(dead_code)] +#![feature(unsize, coerce_unsized)] + +#[repr(packed)] +struct S { + a: i32, + b: i64, +} + +#[repr(packed)] +struct Test1<'a> { + x: u8, + other: &'a u32, +} + +#[repr(packed)] +struct Test2<'a> { + x: u8, + other: &'a Test1<'a>, +} + +fn test(t: Test2) { + let x = *t.other.other; + assert_eq!(x, 42); +} + +fn test_unsizing() { + #[repr(packed)] + struct UnalignedPtr<'a, T: ?Sized> + where T: 'a, + { + data: &'a T, + } + + impl<'a, T, U> std::ops::CoerceUnsized> for UnalignedPtr<'a, T> + where + T: std::marker::Unsize + ?Sized, + U: ?Sized, + { } + + let arr = [1, 2, 3]; + let arr_unaligned: UnalignedPtr<[i32; 3]> = UnalignedPtr { data: &arr }; + let _uns: UnalignedPtr<[i32]> = arr_unaligned; +} + +fn main() { + let mut x = S { + a: 42, + b: 99, + }; + let a = x.a; + let b = x.b; + assert_eq!(a, 42); + assert_eq!(b, 99); + // can't do `assert_eq!(x.a, 42)`, because `assert_eq!` takes a reference + assert_eq!({x.a}, 42); + assert_eq!({x.b}, 99); + + x.b = 77; + assert_eq!({x.b}, 77); + + test(Test2 { x: 0, other: &Test1 { x: 0, other: &42 }}); + + test_unsizing(); +} diff --git a/tests/run-pass/pointers.rs b/tests/run-pass/pointers.rs new file mode 100644 index 0000000000000..2ef7eb0102f19 --- /dev/null +++ b/tests/run-pass/pointers.rs @@ -0,0 +1,60 @@ +fn one_line_ref() -> i16 { + *&1 +} + +fn basic_ref() -> i16 { + let x = &1; + *x +} + +fn basic_ref_mut() -> i16 { + let x = &mut 1; + *x += 2; + *x +} + +fn basic_ref_mut_var() -> i16 { + let mut a = 1; + { + let x = &mut a; + *x += 2; + } + a +} + +fn tuple_ref_mut() -> (i8, i8) { + let mut t = (10, 20); + { + let x = &mut t.1; + *x += 2; + } + t +} + +fn match_ref_mut() -> i8 { + let mut t = (20, 22); + { + let mut opt = Some(&mut t); + match opt { + Some(&mut (ref mut x, ref mut y)) => *x += *y, + None => {}, + } + } + t.0 +} + +fn dangling_pointer() -> *const i32 { + let b = Box::new(42); + &*b as *const i32 +} + +fn main() { + assert_eq!(one_line_ref(), 1); + assert_eq!(basic_ref(), 1); + assert_eq!(basic_ref_mut(), 3); + assert_eq!(basic_ref_mut_var(), 3); + assert_eq!(tuple_ref_mut(), (10, 22)); + assert_eq!(match_ref_mut(), 42); + // FIXME: improve this test... how? + assert!(dangling_pointer() != std::ptr::null()); +} diff --git a/tests/run-pass/products.rs b/tests/run-pass/products.rs new file mode 100644 index 0000000000000..86bb71a0be560 --- /dev/null +++ b/tests/run-pass/products.rs @@ -0,0 +1,32 @@ +fn tuple() -> (i16,) { + (1,) +} + +fn tuple_2() -> (i16, i16) { + (1, 2) +} + +fn tuple_5() -> (i16, i16, i16, i16, i16) { + (1, 2, 3, 4, 5) +} + +#[derive(Debug, PartialEq)] +struct Pair { x: i8, y: i8 } + +fn pair() -> Pair { + Pair { x: 10, y: 20 } +} + +fn field_access() -> (i8, i8) { + let mut p = Pair { x: 10, y: 20 }; + p.x += 5; + (p.x, p.y) +} + +fn main() { + assert_eq!(tuple(), (1,)); + assert_eq!(tuple_2(), (1, 2)); + assert_eq!(tuple_5(), (1, 2, 3, 4, 5)); + assert_eq!(pair(), Pair { x: 10, y: 20} ); + assert_eq!(field_access(), (15, 20)); +} diff --git a/tests/run-pass/ptr_arith_offset.rs b/tests/run-pass/ptr_arith_offset.rs new file mode 100644 index 0000000000000..7912da9fd437c --- /dev/null +++ b/tests/run-pass/ptr_arith_offset.rs @@ -0,0 +1,6 @@ +fn main() { + let v = [1i16, 2]; + let x = &v as *const i16; + let x = x.wrapping_offset(1); + assert_eq!(unsafe { *x }, 2); +} diff --git a/tests/run-pass/ptr_arith_offset_overflow.rs b/tests/run-pass/ptr_arith_offset_overflow.rs new file mode 100644 index 0000000000000..3383c3b801482 --- /dev/null +++ b/tests/run-pass/ptr_arith_offset_overflow.rs @@ -0,0 +1,9 @@ +fn main() { + let v = [1i16, 2]; + let x = &v[1] as *const i16; + // Adding 2*isize::max and then 1 is like substracting 1 + let x = x.wrapping_offset(isize::max_value()); + let x = x.wrapping_offset(isize::max_value()); + let x = x.wrapping_offset(1); + assert_eq!(unsafe { *x }, 1); +} diff --git a/tests/run-pass/ptr_int_casts.rs b/tests/run-pass/ptr_int_casts.rs new file mode 100644 index 0000000000000..88fb16e069ec9 --- /dev/null +++ b/tests/run-pass/ptr_int_casts.rs @@ -0,0 +1,32 @@ +use std::mem; + +fn eq_ref(x: &T, y: &T) -> bool { + x as *const _ == y as *const _ +} + +fn f() -> i32 { 42 } + +fn main() { + // int-ptr-int + assert_eq!(1 as *const i32 as usize, 1); + assert_eq!((1 as *const i32).wrapping_offset(4) as usize, 1 + 4*4); + + { // ptr-int-ptr + let x = 13; + let mut y = &x as &_ as *const _ as usize; + y += 13; + y -= 13; + let y = y as *const _; + assert!(eq_ref(&x, unsafe { &*y })); + } + + { // fnptr-int-fnptr + let x : fn() -> i32 = f; + let y : *mut u8 = unsafe { mem::transmute(x as fn() -> i32) }; + let mut y = y as usize; + y += 13; + y -= 13; + let x : fn() -> i32 = unsafe { mem::transmute(y as *mut u8) }; + assert_eq!(x(), 42); + } +} diff --git a/tests/run-pass/ptr_offset.rs b/tests/run-pass/ptr_offset.rs new file mode 100644 index 0000000000000..6add5212db9f6 --- /dev/null +++ b/tests/run-pass/ptr_offset.rs @@ -0,0 +1,6 @@ +fn main() { + let v = [1i16, 2]; + let x = &v as *const i16; + let x = unsafe { x.offset(1) }; + assert_eq!(unsafe { *x }, 2); +} diff --git a/tests/run-pass/rc.rs b/tests/run-pass/rc.rs new file mode 100644 index 0000000000000..c6de3675abe8c --- /dev/null +++ b/tests/run-pass/rc.rs @@ -0,0 +1,23 @@ +use std::cell::RefCell; +use std::rc::Rc; + +fn rc_refcell() -> i32 { + let r = Rc::new(RefCell::new(42)); + *r.borrow_mut() += 10; + let x = *r.borrow(); + x +} + +fn rc_raw() { + let r = Rc::new(0); + let r2 = Rc::into_raw(r.clone()); + let r2 = unsafe { Rc::from_raw(r2) }; + assert!(Rc::ptr_eq(&r, &r2)); + drop(r); + assert!(Rc::try_unwrap(r2).is_ok()); +} + +fn main() { + rc_refcell(); + rc_raw(); +} diff --git a/tests/run-pass/recursive_static.rs b/tests/run-pass/recursive_static.rs new file mode 100644 index 0000000000000..77f2902917a1c --- /dev/null +++ b/tests/run-pass/recursive_static.rs @@ -0,0 +1,9 @@ +struct S(&'static S); +static S1: S = S(&S2); +static S2: S = S(&S1); + +fn main() { + let p: *const S = S2.0; + let q: *const S = &S1; + assert_eq!(p, q); +} diff --git a/tests/run-pass/regions-lifetime-nonfree-late-bound.rs b/tests/run-pass/regions-lifetime-nonfree-late-bound.rs new file mode 100644 index 0000000000000..1aef95d8a3f30 --- /dev/null +++ b/tests/run-pass/regions-lifetime-nonfree-late-bound.rs @@ -0,0 +1,45 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// This is a regression test for the ICE from issue #10846. +// +// The original issue causing the ICE: the LUB-computations during +// type inference were encountering late-bound lifetimes, and +// asserting that such lifetimes should have already been substituted +// with a concrete lifetime. +// +// However, those encounters were occurring within the lexical scope +// of the binding for the late-bound lifetime; that is, the late-bound +// lifetimes were perfectly valid. The core problem was that the type +// folding code was over-zealously passing back all lifetimes when +// doing region-folding, when really all clients of the region-folding +// case only want to see FREE lifetime variables, not bound ones. + +// pretty-expanded FIXME #23616 + +#![allow(unused_features)] +#![feature(box_syntax)] + +pub fn main() { + fn explicit() { + fn test(_x: Option>) where F: FnMut(Box FnMut(&'a isize)>) {} + test(Some(box |_f: Box FnMut(&'a isize)>| {})); + } + + // The code below is shorthand for the code above (and more likely + // to represent what one encounters in practice). + fn implicit() { + fn test(_x: Option>) where F: FnMut(Box< FnMut(& isize)>) {} + test(Some(box |_f: Box< FnMut(& isize)>| {})); + } + + explicit(); + implicit(); +} diff --git a/tests/run-pass/rfc1623.rs b/tests/run-pass/rfc1623.rs new file mode 100644 index 0000000000000..0ee523a5be00a --- /dev/null +++ b/tests/run-pass/rfc1623.rs @@ -0,0 +1,81 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(dead_code)] + +// very simple test for a 'static static with default lifetime +static STATIC_STR: &str = "&'static str"; +const CONST_STR: &str = "&'static str"; + +// this should be the same as without default: +static EXPLICIT_STATIC_STR: &'static str = "&'static str"; +const EXPLICIT_CONST_STR: &'static str = "&'static str"; + +// a function that elides to an unbound lifetime for both in- and output +fn id_u8_slice(arg: &[u8]) -> &[u8] { + arg +} + +// one with a function, argument elided +static STATIC_SIMPLE_FN: &fn(&[u8]) -> &[u8] = &(id_u8_slice as fn(&[u8]) -> &[u8]); +const CONST_SIMPLE_FN: &fn(&[u8]) -> &[u8] = &(id_u8_slice as fn(&[u8]) -> &[u8]); + +// this should be the same as without elision +static STATIC_NON_ELIDED_FN: &for<'a> fn(&'a [u8]) -> &'a [u8] = + &(id_u8_slice as for<'a> fn(&'a [u8]) -> &'a [u8]); +const CONST_NON_ELIDED_FN: &for<'a> fn(&'a [u8]) -> &'a [u8] = + &(id_u8_slice as for<'a> fn(&'a [u8]) -> &'a [u8]); + +// another function that elides, each to a different unbound lifetime +fn multi_args(_a: &u8, _b: &u8, _c: &u8) {} + +static STATIC_MULTI_FN: &fn(&u8, &u8, &u8) = &(multi_args as fn(&u8, &u8, &u8)); +const CONST_MULTI_FN: &fn(&u8, &u8, &u8) = &(multi_args as fn(&u8, &u8, &u8)); + +struct Foo<'a> { + bools: &'a [bool], +} + +static STATIC_FOO: Foo = Foo { bools: &[true, false] }; +const CONST_FOO: Foo = Foo { bools: &[true, false] }; + +type Bar<'a> = Foo<'a>; + +static STATIC_BAR: Bar = Bar { bools: &[true, false] }; +const CONST_BAR: Bar = Bar { bools: &[true, false] }; + +type Baz<'a> = fn(&'a [u8]) -> Option; + +fn baz(e: &[u8]) -> Option { + e.first().map(|x| *x) +} + +static STATIC_BAZ: &Baz = &(baz as Baz); +const CONST_BAZ: &Baz = &(baz as Baz); + +static BYTES: &[u8] = &[1, 2, 3]; + +fn main() { + // make sure that the lifetime is actually elided (and not defaulted) + let x = &[1u8, 2, 3]; + STATIC_SIMPLE_FN(x); + CONST_SIMPLE_FN(x); + + STATIC_BAZ(BYTES); // neees static lifetime + CONST_BAZ(BYTES); + + // make sure this works with different lifetimes + let a = &1; + { + let b = &2; + let c = &3; + CONST_MULTI_FN(a, b, c); + } +} diff --git a/tests/run-pass/rust-lang-org.rs b/tests/run-pass/rust-lang-org.rs new file mode 100644 index 0000000000000..7ba68e6b239c0 --- /dev/null +++ b/tests/run-pass/rust-lang-org.rs @@ -0,0 +1,21 @@ +// This code is editable and runnable! +fn main() { + // A simple integer calculator: + // `+` or `-` means add or subtract by 1 + // `*` or `/` means multiply or divide by 2 + + let program = "+ + * - /"; + let mut accumulator = 0; + + for token in program.chars() { + match token { + '+' => accumulator += 1, + '-' => accumulator -= 1, + '*' => accumulator *= 2, + '/' => accumulator /= 2, + _ => { /* ignore everything else */ } + } + } + + assert_eq!(accumulator, 1); +} diff --git a/tests/run-pass/send-is-not-static-par-for.rs b/tests/run-pass/send-is-not-static-par-for.rs new file mode 100644 index 0000000000000..4ac1b5436f522 --- /dev/null +++ b/tests/run-pass/send-is-not-static-par-for.rs @@ -0,0 +1,43 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//ignore-windows + +use std::sync::Mutex; + +fn par_for(iter: I, f: F) + where I: Iterator, + I::Item: Send, + F: Fn(I::Item) + Sync +{ + for item in iter { + f(item) + } +} + +fn sum(x: &[i32]) { + let sum_lengths = Mutex::new(0); + par_for(x.windows(4), |x| { + *sum_lengths.lock().unwrap() += x.len() + }); + + assert_eq!(*sum_lengths.lock().unwrap(), (x.len() - 3) * 4); +} + +fn main() { + let mut elements = [0; 20]; + + // iterators over references into this stack frame + par_for(elements.iter_mut().enumerate(), |(i, x)| { + *x = i as i32 + }); + + sum(&elements) +} diff --git a/tests/run-pass/sendable-class.rs b/tests/run-pass/sendable-class.rs new file mode 100644 index 0000000000000..b3e07d00f010f --- /dev/null +++ b/tests/run-pass/sendable-class.rs @@ -0,0 +1,34 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Test that a class with only sendable fields can be sent + +// pretty-expanded FIXME #23616 + +use std::sync::mpsc::channel; + +#[allow(dead_code)] +struct Foo { + i: isize, + j: char, +} + +fn foo(i:isize, j: char) -> Foo { + Foo { + i: i, + j: j + } +} + +pub fn main() { + let (tx, rx) = channel(); + let _ = tx.send(foo(42, 'c')); + let _ = rx; +} diff --git a/tests/run-pass/simd-intrinsic-generic-elements.rs b/tests/run-pass/simd-intrinsic-generic-elements.rs new file mode 100644 index 0000000000000..36567f4c03310 --- /dev/null +++ b/tests/run-pass/simd-intrinsic-generic-elements.rs @@ -0,0 +1,42 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(repr_simd, platform_intrinsics)] + +#[repr(simd)] +#[derive(Copy, Clone, Debug, PartialEq)] +#[allow(non_camel_case_types)] +struct i32x2(i32, i32); +#[repr(simd)] +#[derive(Copy, Clone, Debug, PartialEq)] +#[allow(non_camel_case_types)] +struct i32x3(i32, i32, i32); +#[repr(simd)] +#[derive(Copy, Clone, Debug, PartialEq)] +#[allow(non_camel_case_types)] +struct i32x4(i32, i32, i32, i32); +#[repr(simd)] +#[derive(Copy, Clone, Debug, PartialEq)] +#[allow(non_camel_case_types)] +struct i32x8(i32, i32, i32, i32, + i32, i32, i32, i32); + +fn main() { + let _x2 = i32x2(20, 21); + let _x3 = i32x3(30, 31, 32); + let _x4 = i32x4(40, 41, 42, 43); + let _x8 = i32x8(80, 81, 82, 83, 84, 85, 86, 87); + + let _y2 = i32x2(120, 121); + let _y3 = i32x3(130, 131, 132); + let _y4 = i32x4(140, 141, 142, 143); + let _y8 = i32x8(180, 181, 182, 183, 184, 185, 186, 187); + +} diff --git a/tests/run-pass/slice-of-zero-size-elements.rs b/tests/run-pass/slice-of-zero-size-elements.rs new file mode 100644 index 0000000000000..dbe8ec9addacc --- /dev/null +++ b/tests/run-pass/slice-of-zero-size-elements.rs @@ -0,0 +1,58 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags: -C debug-assertions + +use std::slice; + +fn foo(v: &[T]) -> Option<&[T]> { + let mut it = v.iter(); + for _ in 0..5 { + let _ = it.next(); + } + Some(it.as_slice()) +} + +fn foo_mut(v: &mut [T]) -> Option<&mut [T]> { + let mut it = v.iter_mut(); + for _ in 0..5 { + let _ = it.next(); + } + Some(it.into_slice()) +} + +pub fn main() { + // In a slice of zero-size elements the pointer is meaningless. + // Ensure iteration still works even if the pointer is at the end of the address space. + let slice: &[()] = unsafe { slice::from_raw_parts(-5isize as *const (), 10) }; + assert_eq!(slice.len(), 10); + assert_eq!(slice.iter().count(), 10); + + // .nth() on the iterator should also behave correctly + let mut it = slice.iter(); + assert!(it.nth(5).is_some()); + assert_eq!(it.count(), 4); + + // Converting Iter to a slice should never have a null pointer + assert!(foo(slice).is_some()); + + // Test mutable iterators as well + let slice: &mut [()] = unsafe { slice::from_raw_parts_mut(-5isize as *mut (), 10) }; + assert_eq!(slice.len(), 10); + assert_eq!(slice.iter_mut().count(), 10); + + { + let mut it = slice.iter_mut(); + assert!(it.nth(5).is_some()); + assert_eq!(it.count(), 4); + } + + assert!(foo_mut(slice).is_some()) +} diff --git a/tests/run-pass/small_enum_size_bug.rs b/tests/run-pass/small_enum_size_bug.rs new file mode 100644 index 0000000000000..7576a97e36adf --- /dev/null +++ b/tests/run-pass/small_enum_size_bug.rs @@ -0,0 +1,14 @@ +#![allow(dead_code)] + +enum E { + A = 1, + B = 2, + C = 3, +} + +fn main() { + let enone = None::; + if let Some(..) = enone { + panic!(); + } +} diff --git a/tests/run-pass/specialization.rs b/tests/run-pass/specialization.rs new file mode 100644 index 0000000000000..13894926d36db --- /dev/null +++ b/tests/run-pass/specialization.rs @@ -0,0 +1,21 @@ +#![feature(specialization)] + +trait IsUnit { + fn is_unit() -> bool; +} + +impl IsUnit for T { + default fn is_unit() -> bool { false } +} + +impl IsUnit for () { + fn is_unit() -> bool { true } +} + +fn specialization() -> (bool, bool) { + (i32::is_unit(), <()>::is_unit()) +} + +fn main() { + assert_eq!(specialization(), (false, true)); +} diff --git a/tests/run-pass/static_memory_modification.rs b/tests/run-pass/static_memory_modification.rs new file mode 100644 index 0000000000000..a68f727322e29 --- /dev/null +++ b/tests/run-pass/static_memory_modification.rs @@ -0,0 +1,8 @@ +static mut X: usize = 5; + +fn main() { + unsafe { + X = 6; + assert_eq!(X, 6); + } +} diff --git a/tests/run-pass/static_mut.rs b/tests/run-pass/static_mut.rs new file mode 100644 index 0000000000000..be5830698b211 --- /dev/null +++ b/tests/run-pass/static_mut.rs @@ -0,0 +1,17 @@ +#![allow(dead_code)] + +static mut FOO: i32 = 42; +static BAR: Foo = Foo(unsafe { &FOO as *const _} ); + +struct Foo(*const i32); + +unsafe impl Sync for Foo {} + +fn main() { + unsafe { + assert_eq!(*BAR.0, 42); + FOO = 5; + assert_eq!(FOO, 5); + assert_eq!(*BAR.0, 5); + } +} diff --git a/tests/run-pass/std.rs b/tests/run-pass/std.rs new file mode 100644 index 0000000000000..e0e23812d275e --- /dev/null +++ b/tests/run-pass/std.rs @@ -0,0 +1,33 @@ +use std::cell::{Cell, RefCell}; +use std::rc::Rc; +use std::sync::Arc; + +fn rc_cell() -> Rc> { + let r = Rc::new(Cell::new(42)); + let x = r.get(); + r.set(x + x); + r +} + +fn rc_refcell() -> i32 { + let r = Rc::new(RefCell::new(42)); + *r.borrow_mut() += 10; + let x = *r.borrow(); + x +} + +fn arc() -> Arc { + let a = Arc::new(42); + a +} + +fn true_assert() { + assert_eq!(1, 1); +} + +fn main() { + assert_eq!(*arc(), 42); + assert_eq!(rc_cell().get(), 84); + assert_eq!(rc_refcell(), 52); + true_assert(); +} diff --git a/tests/run-pass/strings.rs b/tests/run-pass/strings.rs new file mode 100644 index 0000000000000..d5fc80b41f012 --- /dev/null +++ b/tests/run-pass/strings.rs @@ -0,0 +1,27 @@ +fn empty() -> &'static str { + "" +} + +fn hello() -> &'static str { + "Hello, world!" +} + +fn hello_bytes() -> &'static [u8; 13] { + b"Hello, world!" +} + +fn hello_bytes_fat() -> &'static [u8] { + b"Hello, world!" +} + +fn fat_pointer_on_32_bit() { + Some(5).expect("foo"); +} + +fn main() { + assert_eq!(empty(), ""); + assert_eq!(hello(), "Hello, world!"); + assert_eq!(hello_bytes(), b"Hello, world!"); + assert_eq!(hello_bytes_fat(), b"Hello, world!"); + fat_pointer_on_32_bit(); // Should run without crashing. +} diff --git a/tests/run-pass/sums.rs b/tests/run-pass/sums.rs new file mode 100644 index 0000000000000..a8dfd5ed66ae7 --- /dev/null +++ b/tests/run-pass/sums.rs @@ -0,0 +1,59 @@ +// FIXME(solson): 32-bit mode doesn't test anything currently. +#![cfg_attr(target_pointer_width = "32", allow(dead_code))] + +#[derive(Debug, PartialEq)] +enum Unit { Unit(()) } // Force non-C-enum representation. + +fn return_unit() -> Unit { + Unit::Unit(()) +} + +#[derive(Debug, PartialEq)] +enum MyBool { False(()), True(()) } // Force non-C-enum representation. + +fn return_true() -> MyBool { + MyBool::True(()) +} + +fn return_false() -> MyBool { + MyBool::False(()) +} + +fn return_none() -> Option { + None +} + +fn return_some() -> Option { + Some(42) +} + +fn match_opt_none() -> i8 { + let x = None; + match x { + Some(data) => data, + None => 42, + } +} + +fn match_opt_some() -> i8 { + let x = Some(13); + match x { + Some(data) => data, + None => 20, + } +} + +fn two_nones() -> (Option, Option) { + (None, None) +} + +fn main() { + assert_eq!(two_nones(), (None, None)); + assert_eq!(match_opt_some(), 13); + assert_eq!(match_opt_none(), 42); + assert_eq!(return_some(), Some(42)); + assert_eq!(return_none(), None); + assert_eq!(return_false(), MyBool::False(())); + assert_eq!(return_true(), MyBool::True(())); + assert_eq!(return_unit(), Unit::Unit(())); +} diff --git a/tests/run-pass/tag-align-dyn-u64.rs b/tests/run-pass/tag-align-dyn-u64.rs new file mode 100644 index 0000000000000..81c19022ab080 --- /dev/null +++ b/tests/run-pass/tag-align-dyn-u64.rs @@ -0,0 +1,37 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(dead_code)] + +use std::mem; + +enum Tag { + Tag2(A) +} + +struct Rec { + c8: u8, + t: Tag +} + +fn mk_rec() -> Rec { + return Rec { c8:0, t:Tag::Tag2(0) }; +} + +fn is_u64_aligned(u: &Tag) -> bool { + let p: usize = unsafe { mem::transmute(u) }; + let u64_align = std::mem::align_of::(); + return (p & (u64_align - 1)) == 0; +} + +pub fn main() { + let x = mk_rec(); + assert!(is_u64_aligned(&x.t)); +} diff --git a/tests/run-pass/thread-local.rs b/tests/run-pass/thread-local.rs new file mode 100644 index 0000000000000..34aeef23b1ad4 --- /dev/null +++ b/tests/run-pass/thread-local.rs @@ -0,0 +1,67 @@ +//ignore-windows + +#![feature(libc)] +extern crate libc; + +use std::mem; + +pub type Key = libc::pthread_key_t; + +static mut RECORD : usize = 0; +static mut KEYS : [Key; 2] = [0; 2]; +static mut GLOBALS : [u64; 2] = [1, 0]; + +static mut CANNARY : *mut u64 = 0 as *mut _; // this serves as a cannary: if TLS dtors are not run properly, this will not get deallocated, making the test fail. + +pub unsafe fn create(dtor: Option) -> Key { + let mut key = 0; + assert_eq!(libc::pthread_key_create(&mut key, mem::transmute(dtor)), 0); + key +} + +pub unsafe fn set(key: Key, value: *mut u8) { + let r = libc::pthread_setspecific(key, value as *mut _); + assert_eq!(r, 0); +} + +pub fn record(r: usize) { + assert!(r < 10); + unsafe { RECORD = RECORD*10 + r }; +} + +unsafe extern fn dtor(mut ptr: *mut u64) { + assert!(CANNARY != 0 as *mut _); // make sure we do not get run too often + let val = *ptr; + + let which_key = GLOBALS.iter().position(|global| global as *const _ == ptr).expect("Should find my global"); + record(which_key); + + if val > 0 { + *ptr = val-1; + set(KEYS[which_key], ptr as *mut _); + } + + // Check if the records matches what we expect. If yes, clear the cannary. + // If the record is wrong, the cannary will never get cleared, leading to a leak -> test fails. + // If the record is incomplete (i.e., more dtor calls happen), the check at the beginning of this function will fail -> test fails. + // The correct sequence is: First key 0, then key 1, then key 0. + if RECORD == 0_1_0 { + drop(Box::from_raw(CANNARY)); + CANNARY = 0 as *mut _; + } +} + +fn main() { + unsafe { + create(None); // check that the no-dtor case works + + // Initialize the keys we use to check destructor ordering + for (key, global) in KEYS.iter_mut().zip(GLOBALS.iter()) { + *key = create(Some(mem::transmute(dtor as unsafe extern fn(*mut u64)))); + set(*key, global as *const _ as *mut _); + } + + // Initialize cannary + CANNARY = Box::into_raw(Box::new(0u64)); + } +} diff --git a/tests/run-pass/too-large-primval-write-problem.rs b/tests/run-pass/too-large-primval-write-problem.rs new file mode 100644 index 0000000000000..1bbe45277c43f --- /dev/null +++ b/tests/run-pass/too-large-primval-write-problem.rs @@ -0,0 +1,23 @@ +// PrimVals in Miri are represented with 8 bytes (u64) and at the time of writing, the `-x` +// will sign extend into the entire 8 bytes. Then, if you tried to write the `-x` into +// something smaller than 8 bytes, like a 4 byte pointer, it would crash in byteorder crate +// code that assumed only the low 4 bytes would be set. Actually, we were masking properly for +// everything except pointers before I fixed it, so this was probably impossible to reproduce on +// 64-bit. +// +// This is just intended as a regression test to make sure we don't reintroduce this problem. + +#[cfg(target_pointer_width = "32")] +fn main() { + use std::mem::transmute; + + // Make the weird PrimVal. + let x = 1i32; + let bad = unsafe { transmute::(-x) }; + + // Force it through the Memory::write_primval code. + Box::new(bad); +} + +#[cfg(not(target_pointer_width = "32"))] +fn main() {} diff --git a/tests/run-pass/traits.rs b/tests/run-pass/traits.rs new file mode 100644 index 0000000000000..e3d93957fd96b --- /dev/null +++ b/tests/run-pass/traits.rs @@ -0,0 +1,31 @@ +struct Struct(i32); + +trait Trait { + fn method(&self); +} + +impl Trait for Struct { + fn method(&self) { + assert_eq!(self.0, 42); + } +} + +struct Foo(T); + +fn main() { + let y: &Trait = &Struct(42); + y.method(); + let x: Foo = Foo(Struct(42)); + let y: &Foo = &x; + y.0.method(); + /* + let x: Box i32> = Box::new(|x| x * 2); + assert_eq!(x(21), 42); + let mut i = 5; + { + let mut x: Box = Box::new(|| i *= 2); + x(); x(); + } + assert_eq!(i, 20); + */ +} diff --git a/tests/run-pass/trivial.rs b/tests/run-pass/trivial.rs new file mode 100644 index 0000000000000..891d115206561 --- /dev/null +++ b/tests/run-pass/trivial.rs @@ -0,0 +1,11 @@ +fn empty() {} + +fn unit_var() { + let x = (); + x +} + +fn main() { + empty(); + unit_var(); +} diff --git a/tests/run-pass/try-operator-custom.rs b/tests/run-pass/try-operator-custom.rs new file mode 100644 index 0000000000000..3b447f36ece1e --- /dev/null +++ b/tests/run-pass/try-operator-custom.rs @@ -0,0 +1,13 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn main() { + assert!(Ok::(42) == Ok(42)); +} diff --git a/tests/run-pass/tuple_like_enum_variant_constructor.rs b/tests/run-pass/tuple_like_enum_variant_constructor.rs new file mode 100644 index 0000000000000..5cf91b3f4d194 --- /dev/null +++ b/tests/run-pass/tuple_like_enum_variant_constructor.rs @@ -0,0 +1,3 @@ +fn main() { + assert_eq!(Some(42).map(Some), Some(Some(42))); +} diff --git a/tests/run-pass/tuple_like_enum_variant_constructor_pointer_opt.rs b/tests/run-pass/tuple_like_enum_variant_constructor_pointer_opt.rs new file mode 100644 index 0000000000000..fb57d4f4c1652 --- /dev/null +++ b/tests/run-pass/tuple_like_enum_variant_constructor_pointer_opt.rs @@ -0,0 +1,4 @@ +fn main() { + let x = 5; + assert_eq!(Some(&x).map(Some), Some(Some(&x))); +} diff --git a/tests/run-pass/tuple_like_enum_variant_constructor_struct_pointer_opt.rs b/tests/run-pass/tuple_like_enum_variant_constructor_struct_pointer_opt.rs new file mode 100644 index 0000000000000..44441ed1d36c8 --- /dev/null +++ b/tests/run-pass/tuple_like_enum_variant_constructor_struct_pointer_opt.rs @@ -0,0 +1,33 @@ +#[derive(Copy, Clone, PartialEq, Debug)] +struct A<'a> { + x: i32, + y: &'a i32, +} + +#[derive(Copy, Clone, PartialEq, Debug)] +struct B<'a>(i32, &'a i32); + +#[derive(Copy, Clone, PartialEq, Debug)] +enum C<'a> { + Value(i32, &'a i32), + #[allow(dead_code)] + NoValue, +} + +fn main() { + let x = 5; + let a = A { x: 99, y: &x }; + assert_eq!(Some(a).map(Some), Some(Some(a))); + let f = B; + assert_eq!(Some(B(42, &x)), Some(f(42, &x))); + // the following doesn't compile :( + //let f: for<'a> fn(i32, &'a i32) -> B<'a> = B; + //assert_eq!(Some(B(42, &x)), Some(f(42, &x))); + assert_eq!(B(42, &x), foo(&x, B)); + let f = C::Value; + assert_eq!(C::Value(42, &x), f(42, &x)); +} + +fn foo<'a, F: Fn(i32, &'a i32) -> B<'a>>(i: &'a i32, f: F) -> B<'a> { + f(42, i) +} diff --git a/tests/run-pass/tuple_like_struct_constructor.rs b/tests/run-pass/tuple_like_struct_constructor.rs new file mode 100644 index 0000000000000..05e8893de1787 --- /dev/null +++ b/tests/run-pass/tuple_like_struct_constructor.rs @@ -0,0 +1,5 @@ +fn main() { + #[derive(PartialEq, Eq, Debug)] + struct A(i32); + assert_eq!(Some(42).map(A), Some(A(42))); +} diff --git a/tests/run-pass/union-overwrite.rs b/tests/run-pass/union-overwrite.rs new file mode 100644 index 0000000000000..df2ff6e51a593 --- /dev/null +++ b/tests/run-pass/union-overwrite.rs @@ -0,0 +1,81 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(untagged_unions)] +#![allow(unions_with_drop_fields)] + +#[repr(C)] +struct Pair(T, U); +#[repr(C)] +struct Triple(T, T, T); + +#[repr(C)] +union U { + a: Pair, + b: B, +} + +#[repr(C)] +union W { + a: A, + b: B, +} + +#[cfg(target_endian = "little")] +unsafe fn check() { + let mut u = U:: { b: 0xDE_DE }; + u.a.0 = 0xBE; + assert_eq!(u.b, 0xDE_BE); + + let mut u = U:: { b: 0xDEAD_DEAD }; + u.a.0 = 0xBEEF; + assert_eq!(u.b, 0xDEAD_BEEF); + + let mut u = U:: { b: 0xDEADBEEF_DEADBEEF }; + u.a.0 = 0xBAADF00D; + assert_eq!(u.b, 0xDEADBEEF_BAADF00D); + + let mut w = W::, u8>, u32> { b: 0xDEAD_DEAD }; + w.a.0 = Triple(0, 0, 0); + assert_eq!(w.b, 0xDE00_0000); + + let mut w = W::>, u32> { b: 0xDEAD_DEAD }; + w.a.1 = Triple(0, 0, 0); + assert_eq!(w.b, 0x0000_00AD); +} + +#[cfg(target_endian = "big")] +unsafe fn check() { + let mut u = U:: { b: 0xDE_DE }; + u.a.0 = 0xBE; + assert_eq!(u.b, 0xBE_DE); + + let mut u = U:: { b: 0xDEAD_DEAD }; + u.a.0 = 0xBEEF; + assert_eq!(u.b, 0xBEEF_DEAD); + + let mut u = U:: { b: 0xDEADBEEF_DEADBEEF }; + u.a.0 = 0xBAADF00D; + assert_eq!(u.b, 0xBAADF00D_DEADBEEF); + + let mut w = W::, u8>, u32> { b: 0xDEAD_DEAD }; + w.a.0 = Triple(0, 0, 0); + assert_eq!(w.b, 0x0000_00AD); + + let mut w = W::>, u32> { b: 0xDEAD_DEAD }; + w.a.1 = Triple(0, 0, 0); + assert_eq!(w.b, 0xDE00_0000); +} + +fn main() { + unsafe { + check(); + } +} diff --git a/tests/run-pass/union.rs b/tests/run-pass/union.rs new file mode 100644 index 0000000000000..342c94f3d4a34 --- /dev/null +++ b/tests/run-pass/union.rs @@ -0,0 +1,88 @@ +#![feature(untagged_unions)] +#![allow(dead_code, unused_variables)] + +fn main() { + a(); + b(); + c(); + d(); +} + +fn a() { + union U { + f1: u32, + f2: f32, + } + let mut u = U { f1: 1 }; + unsafe { + let b1 = &mut u.f1; + *b1 = 5; + } + assert_eq!(unsafe { u.f1 }, 5); +} + +fn b() { + struct S { + x: u32, + y: u32, + } + + union U { + s: S, + both: u64, + } + let mut u = U { s: S { x: 1, y: 2 } }; + unsafe { + let bx = &mut u.s.x; + let by = &mut u.s.y; + *bx = 5; + *by = 10; + } + assert_eq!(unsafe { u.s.x }, 5); + assert_eq!(unsafe { u.s.y }, 10); +} + +fn c() { + #[repr(u32)] + enum Tag { I, F } + + #[repr(C)] + union U { + i: i32, + f: f32, + } + + #[repr(C)] + struct Value { + tag: Tag, + u: U, + } + + fn is_zero(v: Value) -> bool { + unsafe { + match v { + Value { tag: Tag::I, u: U { i: 0 } } => true, + Value { tag: Tag::F, u: U { f } } => f == 0.0, + _ => false, + } + } + } + assert!(is_zero(Value { tag: Tag::I, u: U { i: 0 }})); + assert!(is_zero(Value { tag: Tag::F, u: U { f: 0.0 }})); + assert!(!is_zero(Value { tag: Tag::I, u: U { i: 1 }})); + assert!(!is_zero(Value { tag: Tag::F, u: U { f: 42.0 }})); +} + +fn d() { + union MyUnion { + f1: u32, + f2: f32, + } + let u = MyUnion { f1: 10 }; + unsafe { + match u { + MyUnion { f1: 10 } => { } + MyUnion { f2 } => { panic!("foo"); } + } + } +} diff --git a/tests/run-pass/unique-send.rs b/tests/run-pass/unique-send.rs new file mode 100644 index 0000000000000..7644da08e4afa --- /dev/null +++ b/tests/run-pass/unique-send.rs @@ -0,0 +1,20 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(box_syntax)] + +use std::sync::mpsc::channel; + +pub fn main() { + let (tx, rx) = channel::>(); + tx.send(box 100).unwrap(); + let v = rx.recv().unwrap(); + assert_eq!(v, box 100); +} diff --git a/tests/run-pass/vec-matching-fold.rs b/tests/run-pass/vec-matching-fold.rs new file mode 100644 index 0000000000000..ac80a4211ada6 --- /dev/null +++ b/tests/run-pass/vec-matching-fold.rs @@ -0,0 +1,58 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +#![feature(advanced_slice_patterns)] +#![feature(slice_patterns)] + +use std::fmt::Debug; + +fn foldl(values: &[T], + initial: U, + mut function: F) + -> U where + U: Clone+Debug, T:Debug, + F: FnMut(U, &T) -> U, +{ match values { + &[ref head, ref tail..] => + foldl(tail, function(initial, head), function), + &[] => { + // FIXME: call guards + let res = initial.clone(); res + } + } +} + +fn foldr(values: &[T], + initial: U, + mut function: F) + -> U where + U: Clone, + F: FnMut(&T, U) -> U, +{ + match values { + &[ref head.., ref tail] => + foldr(head, function(tail, initial), function), + &[] => { + // FIXME: call guards + let res = initial.clone(); res + } + } +} + +pub fn main() { + let x = &[1, 2, 3, 4, 5]; + + let product = foldl(x, 1, |a, b| a * *b); + assert_eq!(product, 120); + + let sum = foldr(x, 0, |a, b| *a + b); + assert_eq!(sum, 15); +} diff --git a/tests/run-pass/write-bytes.rs b/tests/run-pass/write-bytes.rs new file mode 100644 index 0000000000000..7c9a38fca696d --- /dev/null +++ b/tests/run-pass/write-bytes.rs @@ -0,0 +1,45 @@ +#[repr(C)] +#[derive(Copy, Clone)] +struct Foo { + a: u64, + b: u64, + c: u64, +} + +fn main() { + const LENGTH: usize = 10; + let mut v: [u64; LENGTH] = [0; LENGTH]; + + for idx in 0..LENGTH { + assert_eq!(v[idx], 0); + } + + unsafe { + let p = v.as_mut_ptr(); + ::std::ptr::write_bytes(p, 0xab, LENGTH); + } + + for idx in 0..LENGTH { + assert_eq!(v[idx], 0xabababababababab); + } + + // ----- + + let mut w: [Foo; LENGTH] = [Foo { a: 0, b: 0, c: 0 }; LENGTH]; + for idx in 0..LENGTH { + assert_eq!(w[idx].a, 0); + assert_eq!(w[idx].b, 0); + assert_eq!(w[idx].c, 0); + } + + unsafe { + let p = w.as_mut_ptr(); + ::std::ptr::write_bytes(p, 0xcd, LENGTH); + } + + for idx in 0..LENGTH { + assert_eq!(w[idx].a, 0xcdcdcdcdcdcdcdcd); + assert_eq!(w[idx].b, 0xcdcdcdcdcdcdcdcd); + assert_eq!(w[idx].c, 0xcdcdcdcdcdcdcdcd); + } +} diff --git a/tests/run-pass/zero-sized-binary-heap-push.rs b/tests/run-pass/zero-sized-binary-heap-push.rs new file mode 100644 index 0000000000000..63a0d65f017de --- /dev/null +++ b/tests/run-pass/zero-sized-binary-heap-push.rs @@ -0,0 +1,28 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::collections::BinaryHeap; +use std::iter::Iterator; + +fn main() { + const N: usize = 8; + + for len in 0..N { + let mut tester = BinaryHeap::with_capacity(len); + assert_eq!(tester.len(), 0); + assert!(tester.capacity() >= len); + for _ in 0..len { + tester.push(()); + } + assert_eq!(tester.len(), len); + assert_eq!(tester.iter().count(), len); + tester.clear(); + } +} diff --git a/tests/run-pass/zst.rs b/tests/run-pass/zst.rs new file mode 100644 index 0000000000000..c1c88875c5c80 --- /dev/null +++ b/tests/run-pass/zst.rs @@ -0,0 +1,18 @@ +#[derive(PartialEq, Debug)] +struct A; + +fn zst_ret() -> A { + A +} + +fn use_zst() -> A { + let a = A; + a +} + +fn main() { + assert_eq!(zst_ret(), A); + assert_eq!(use_zst(), A); + let x = 42 as *mut (); + unsafe { *x = (); } +} diff --git a/tests/run-pass/zst_box.rs b/tests/run-pass/zst_box.rs new file mode 100644 index 0000000000000..12138be5af976 --- /dev/null +++ b/tests/run-pass/zst_box.rs @@ -0,0 +1,8 @@ +fn main() { + let x = Box::new(()); + let y = Box::new(()); + drop(y); + let z = Box::new(()); + drop(x); + drop(z); +} diff --git a/tests/run-pass/zst_variant_drop.rs b/tests/run-pass/zst_variant_drop.rs new file mode 100644 index 0000000000000..a76f64ce29df7 --- /dev/null +++ b/tests/run-pass/zst_variant_drop.rs @@ -0,0 +1,23 @@ +struct Foo; +impl Drop for Foo { + fn drop(&mut self) { + unsafe { + FOO = true; + } + } +} + +static mut FOO: bool = false; + +enum Bar { + A(Box), + B(Foo), +} + +fn main() { + assert!(unsafe { !FOO }); + drop(Bar::A(Box::new(42))); + assert!(unsafe { !FOO }); + drop(Bar::B(Foo)); + assert!(unsafe { FOO }); +}