Auto merge of #149474 - RalfJung:miri, r=RalfJung

miri subtree update

Subtree update of `miri` to 0749929920.

Created using https://github.com/rust-lang/josh-sync.

r? `@ghost`
This commit is contained in:
bors 2025-11-30 10:27:52 +00:00
commit 03ce87dfb2
38 changed files with 1497 additions and 1253 deletions

View file

@ -31,13 +31,13 @@ jobs:
os: ubuntu-24.04-arm
multiarch: armhf
gcc_cross: arm-linux-gnueabihf
- host_target: riscv64gc-unknown-linux-gnu
os: ubuntu-latest
multiarch: riscv64
gcc_cross: riscv64-linux-gnu
qemu: true
# Ubuntu mirrors are not reliable enough for these architectures
# (see <https://bugs.launchpad.net/ubuntu/+bug/2130309>).
# - host_target: riscv64gc-unknown-linux-gnu
# os: ubuntu-latest
# multiarch: riscv64
# gcc_cross: riscv64-linux-gnu
# qemu: true
# - host_target: s390x-unknown-linux-gnu
# os: ubuntu-latest
# multiarch: s390x
@ -59,13 +59,6 @@ jobs:
HOST_TARGET: ${{ matrix.host_target }}
steps:
- uses: actions/checkout@v4
- name: apt update
if: ${{ startsWith(matrix.os, 'ubuntu') }}
# The runners seem to have outdated apt repos sometimes
run: sudo apt update
- name: install qemu
if: ${{ matrix.qemu }}
run: sudo apt install qemu-user qemu-user-binfmt
- name: install multiarch
if: ${{ matrix.multiarch != '' }}
run: |
@ -75,10 +68,13 @@ jobs:
sudo dpkg --add-architecture ${{ matrix.multiarch }}
# Ubuntu Ports often has outdated mirrors so try a few times to get the apt repo
for TRY in $(seq 3); do
{ sudo apt update && break; } || sleep 30
{ sudo apt update && break; } || sleep 60
done
# Install needed packages
sudo apt install $(echo "libatomic1: zlib1g-dev:" | sed 's/:/:${{ matrix.multiarch }}/g')
- name: install qemu
if: ${{ matrix.qemu }}
run: sudo apt install qemu-user qemu-user-binfmt
- uses: ./.github/workflows/setup
with:
toolchain_flags: "--host ${{ matrix.host_target }}"

View file

@ -959,7 +959,6 @@ dependencies = [
"serde_json",
"smallvec",
"tempfile",
"tikv-jemalloc-sys",
"ui_test",
]
@ -1504,16 +1503,6 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "tikv-jemalloc-sys"
version = "0.6.1+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd8aa5b2ab86a2cefa406d889139c162cbb230092f7d1d7cbc1716405d852a3b"
dependencies = [
"cc",
"libc",
]
[[package]]
name = "tinystr"
version = "0.8.2"

View file

@ -41,7 +41,6 @@ nix = { version = "0.30.1", features = ["mman", "ptrace", "signal"], optional =
ipc-channel = { version = "0.20.0", optional = true }
capstone = { version = "0.13", optional = true }
# FIXME(genmc,macos): Add `target_os = "macos"` once https://github.com/dtolnay/cxx/issues/1535 is fixed.
[target.'cfg(all(target_os = "linux", target_pointer_width = "64", target_endian = "little"))'.dependencies]
genmc-sys = { path = "./genmc-sys/", version = "0.1.0", optional = true }

View file

@ -496,8 +496,6 @@ to Miri failing to detect cases of undefined behavior in a program.
of Rust will be stricter than Tree Borrows. In other words, if you use Tree Borrows,
even if your code is accepted today, it might be declared UB in the future.
This is much less likely with Stacked Borrows.
Using Tree Borrows currently implies `-Zmiri-strict-provenance` because integer-to-pointer
casts are not supported in this mode, but that may change in the future.
* `-Zmiri-tree-borrows-no-precise-interior-mut` makes Tree Borrows
track interior mutable data on the level of references instead of on the
byte-level as is done by default. Therefore, with this flag, Tree

View file

@ -28,7 +28,7 @@ mod downloading {
/// The GenMC repository the we get our commit from.
pub(crate) const GENMC_GITHUB_URL: &str = "https://github.com/MPI-SWS/genmc.git";
/// The GenMC commit we depend on. It must be available on the specified GenMC repository.
pub(crate) const GENMC_COMMIT: &str = "aa10ed65117c3291524efc19253b5d443a4602ac";
pub(crate) const GENMC_COMMIT: &str = "22d3d0b44dedb4e8e1aae3330e546465e4664529";
/// Ensure that a local GenMC repo is present and set to the correct commit.
/// Return the path of the GenMC repo and whether the checked out commit was changed.
@ -178,14 +178,8 @@ fn compile_cpp_dependencies(genmc_path: &Path, always_configure: bool) {
// These are all the C++ files we need to compile, which needs to be updated if more C++ files are added to Miri.
// We use absolute paths since relative paths can confuse IDEs when attempting to go-to-source on a path in a compiler error.
let cpp_files_base_path = Path::new("cpp/src/");
let cpp_files = [
"MiriInterface/EventHandling.cpp",
"MiriInterface/Exploration.cpp",
"MiriInterface/Mutex.cpp",
"MiriInterface/Setup.cpp",
"MiriInterface/ThreadManagement.cpp",
]
.map(|file| std::path::absolute(cpp_files_base_path.join(file)).unwrap());
let cpp_files = ["MiriInterface/Exploration.cpp", "MiriInterface/Setup.cpp"]
.map(|file| std::path::absolute(cpp_files_base_path.join(file)).unwrap());
let mut bridge = cxx_build::bridge("src/lib.rs");
// FIXME(genmc,cmake): Remove once the GenMC debug setting is available in the config.h file.

View file

@ -125,8 +125,11 @@ struct MiriGenmcShim : private GenMCDriver {
void handle_fence(ThreadId thread_id, MemOrdering ord);
/**** Memory (de)allocation ****/
auto handle_malloc(ThreadId thread_id, uint64_t size, uint64_t alignment) -> uint64_t;
auto handle_free(ThreadId thread_id, uint64_t address) -> bool;
/** Returns null on success, or an error string if an error occurs. */
auto handle_free(ThreadId thread_id, uint64_t address) -> std::unique_ptr<std::string>;
/**** Thread management ****/
void handle_thread_create(ThreadId thread_id, ThreadId parent_id);

View file

@ -1,265 +0,0 @@
/** This file contains functionality related to handling events encountered
* during an execution, such as loads, stores or memory (de)allocation. */
#include "MiriInterface.hpp"
// CXX.rs generated headers:
#include "genmc-sys/src/lib.rs.h"
// GenMC headers:
#include "ADT/value_ptr.hpp"
#include "ExecutionGraph/EventLabel.hpp"
#include "ExecutionGraph/LoadAnnotation.hpp"
#include "Runtime/InterpreterEnumAPI.hpp"
#include "Static/ModuleID.hpp"
#include "Support/ASize.hpp"
#include "Support/Error.hpp"
#include "Support/Logger.hpp"
#include "Support/MemAccess.hpp"
#include "Support/RMWOps.hpp"
#include "Support/SAddr.hpp"
#include "Support/SVal.hpp"
#include "Support/ThreadInfo.hpp"
#include "Support/Verbosity.hpp"
#include "Verification/GenMCDriver.hpp"
#include "Verification/MemoryModel.hpp"
// C++ headers:
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
/**** Blocking instructions ****/
void MiriGenmcShim::handle_assume_block(ThreadId thread_id, AssumeType assume_type) {
BUG_ON(getExec().getGraph().isThreadBlocked(thread_id));
GenMCDriver::handleAssume(nullptr, inc_pos(thread_id), assume_type);
}
/**** Memory access handling ****/
[[nodiscard]] auto MiriGenmcShim::handle_load(
ThreadId thread_id,
uint64_t address,
uint64_t size,
MemOrdering ord,
GenmcScalar old_val
) -> LoadResult {
// `type` is only used for printing.
const auto type = AType::Unsigned;
const auto ret = handle_load_reset_if_none<EventLabel::EventLabelKind::Read>(
thread_id,
GenmcScalarExt::try_to_sval(old_val),
ord,
SAddr(address),
ASize(size),
type
);
if (const auto* err = std::get_if<VerificationError>(&ret))
return LoadResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
if (ret_val == nullptr)
ERROR("Unimplemented: load returned unexpected result.");
return LoadResultExt::from_value(*ret_val);
}
[[nodiscard]] auto MiriGenmcShim::handle_store(
ThreadId thread_id,
uint64_t address,
uint64_t size,
GenmcScalar value,
GenmcScalar old_val,
MemOrdering ord
) -> StoreResult {
const auto pos = inc_pos(thread_id);
const auto ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::Write>(
nullptr,
pos,
GenmcScalarExt::try_to_sval(old_val),
ord,
SAddr(address),
ASize(size),
/* type */ AType::Unsigned, // `type` is only used for printing.
GenmcScalarExt::to_sval(value),
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&ret))
return StoreResultExt::from_error(format_error(*err));
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(
nullptr == is_coherence_order_maximal_write,
"Unimplemented: Store returned unexpected result."
);
return StoreResultExt::ok(*is_coherence_order_maximal_write);
}
void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
const auto pos = inc_pos(thread_id);
GenMCDriver::handleFence(nullptr, pos, ord, EventDeps());
}
[[nodiscard]] auto MiriGenmcShim::handle_read_modify_write(
ThreadId thread_id,
uint64_t address,
uint64_t size,
RMWBinOp rmw_op,
MemOrdering ordering,
GenmcScalar rhs_value,
GenmcScalar old_val
) -> ReadModifyWriteResult {
// NOTE: Both the store and load events should get the same `ordering`, it should not be split
// into a load and a store component. This means we can have for example `AcqRel` loads and
// stores, but this is intended for RMW operations.
// Somewhat confusingly, the GenMC term for RMW read/write labels is
// `FaiRead` and `FaiWrite`.
const auto load_ret = handle_load_reset_if_none<EventLabel::EventLabelKind::FaiRead>(
thread_id,
GenmcScalarExt::try_to_sval(old_val),
ordering,
SAddr(address),
ASize(size),
AType::Unsigned, // The type is only used for printing.
rmw_op,
GenmcScalarExt::to_sval(rhs_value),
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&load_ret))
return ReadModifyWriteResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&load_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
if (nullptr == ret_val) {
ERROR("Unimplemented: read-modify-write returned unexpected result.");
}
const auto read_old_val = *ret_val;
const auto new_value =
executeRMWBinOp(read_old_val, GenmcScalarExt::to_sval(rhs_value), size, rmw_op);
const auto storePos = inc_pos(thread_id);
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::FaiWrite>(
nullptr,
storePos,
GenmcScalarExt::try_to_sval(old_val),
ordering,
SAddr(address),
ASize(size),
AType::Unsigned, // The type is only used for printing.
new_value
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
return ReadModifyWriteResultExt::from_error(format_error(*err));
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&store_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(
nullptr == is_coherence_order_maximal_write,
"Unimplemented: RMW store returned unexpected result."
);
return ReadModifyWriteResultExt::ok(
/* old_value: */ read_old_val,
new_value,
*is_coherence_order_maximal_write
);
}
[[nodiscard]] auto MiriGenmcShim::handle_compare_exchange(
ThreadId thread_id,
uint64_t address,
uint64_t size,
GenmcScalar expected_value,
GenmcScalar new_value,
GenmcScalar old_val,
MemOrdering success_ordering,
MemOrdering fail_load_ordering,
bool can_fail_spuriously
) -> CompareExchangeResult {
// NOTE: Both the store and load events should get the same `ordering`, it should not be split
// into a load and a store component. This means we can have for example `AcqRel` loads and
// stores, but this is intended for CAS operations.
// FIXME(GenMC): properly handle failure memory ordering.
auto expectedVal = GenmcScalarExt::to_sval(expected_value);
auto new_val = GenmcScalarExt::to_sval(new_value);
const auto load_ret = handle_load_reset_if_none<EventLabel::EventLabelKind::CasRead>(
thread_id,
GenmcScalarExt::try_to_sval(old_val),
success_ordering,
SAddr(address),
ASize(size),
AType::Unsigned, // The type is only used for printing.
expectedVal,
new_val
);
if (const auto* err = std::get_if<VerificationError>(&load_ret))
return CompareExchangeResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&load_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(nullptr == ret_val, "Unimplemented: load returned unexpected result.");
const auto read_old_val = *ret_val;
if (read_old_val != expectedVal)
return CompareExchangeResultExt::failure(read_old_val);
// FIXME(GenMC): Add support for modelling spurious failures.
const auto storePos = inc_pos(thread_id);
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::CasWrite>(
nullptr,
storePos,
GenmcScalarExt::try_to_sval(old_val),
success_ordering,
SAddr(address),
ASize(size),
AType::Unsigned, // The type is only used for printing.
new_val
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
return CompareExchangeResultExt::from_error(format_error(*err));
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&store_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(
nullptr == is_coherence_order_maximal_write,
"Unimplemented: compare-exchange store returned unexpected result."
);
return CompareExchangeResultExt::success(read_old_val, *is_coherence_order_maximal_write);
}
/**** Memory (de)allocation ****/
auto MiriGenmcShim::handle_malloc(ThreadId thread_id, uint64_t size, uint64_t alignment)
-> uint64_t {
const auto pos = inc_pos(thread_id);
// These are only used for printing and features Miri-GenMC doesn't support (yet).
const auto storage_duration = StorageDuration::SD_Heap;
// Volatile, as opposed to "persistent" (i.e., non-volatile memory that persists over reboots)
const auto storage_type = StorageType::ST_Volatile;
const auto address_space = AddressSpace::AS_User;
const SVal ret_val = GenMCDriver::handleMalloc(
nullptr,
pos,
size,
alignment,
storage_duration,
storage_type,
address_space,
EventDeps()
);
return ret_val.get();
}
auto MiriGenmcShim::handle_free(ThreadId thread_id, uint64_t address) -> bool {
const auto pos = inc_pos(thread_id);
GenMCDriver::handleFree(nullptr, pos, SAddr(address), EventDeps());
// FIXME(genmc): use returned error from `handleFree` once implemented in GenMC.
return getResult().status.has_value();
}

View file

@ -1,4 +1,5 @@
/** This file contains functionality related to exploration, such as scheduling. */
/** This file contains functionality related to exploration events
* such as loads, stores and memory (de)allocation. */
#include "MiriInterface.hpp"
@ -6,13 +7,32 @@
#include "genmc-sys/src/lib.rs.h"
// GenMC headers:
#include "ADT/value_ptr.hpp"
#include "ExecutionGraph/EventLabel.hpp"
#include "ExecutionGraph/LoadAnnotation.hpp"
#include "Runtime/InterpreterEnumAPI.hpp"
#include "Static/ModuleID.hpp"
#include "Support/ASize.hpp"
#include "Support/Error.hpp"
#include "Support/Logger.hpp"
#include "Support/MemAccess.hpp"
#include "Support/RMWOps.hpp"
#include "Support/SAddr.hpp"
#include "Support/SVal.hpp"
#include "Support/ThreadInfo.hpp"
#include "Support/Verbosity.hpp"
#include "Verification/GenMCDriver.hpp"
#include "Verification/MemoryModel.hpp"
// C++ headers:
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
/** Scheduling */
auto MiriGenmcShim::schedule_next(
const int curr_thread_id,
@ -41,8 +61,6 @@ auto MiriGenmcShim::schedule_next(
);
}
/**** Execution start/end handling ****/
void MiriGenmcShim::handle_execution_start() {
threads_action_.clear();
threads_action_.push_back(Action(ActionKind::Load, Event::getInit()));
@ -50,9 +68,230 @@ void MiriGenmcShim::handle_execution_start() {
}
auto MiriGenmcShim::handle_execution_end() -> std::unique_ptr<std::string> {
// FIXME(genmc): add error handling once GenMC returns an error here.
GenMCDriver::handleExecutionEnd();
return {};
auto ret = GenMCDriver::handleExecutionEnd();
return ret.has_value() ? format_error(*ret) : nullptr;
}
/**** Blocking instructions ****/
void MiriGenmcShim::handle_assume_block(ThreadId thread_id, AssumeType assume_type) {
BUG_ON(getExec().getGraph().isThreadBlocked(thread_id));
GenMCDriver::handleAssume(nullptr, inc_pos(thread_id), assume_type);
}
/**** Memory access handling ****/
[[nodiscard]] auto MiriGenmcShim::handle_load(
ThreadId thread_id,
uint64_t address,
uint64_t size,
MemOrdering ord,
GenmcScalar old_val
) -> LoadResult {
// `type` is only used for printing.
const auto type = AType::Unsigned;
const auto ret = handle_load_reset_if_none<EventLabel::EventLabelKind::Read>(
thread_id,
GenmcScalarExt::try_to_sval(old_val),
ord,
SAddr(address),
ASize(size),
type
);
if (const auto* err = std::get_if<VerificationError>(&ret))
return LoadResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(!ret_val, "Unimplemented: load returned unexpected result.");
return LoadResultExt::from_value(*ret_val);
}
[[nodiscard]] auto MiriGenmcShim::handle_store(
ThreadId thread_id,
uint64_t address,
uint64_t size,
GenmcScalar value,
GenmcScalar old_val,
MemOrdering ord
) -> StoreResult {
const auto pos = inc_pos(thread_id);
const auto ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::Write>(
nullptr,
pos,
GenmcScalarExt::try_to_sval(old_val),
ord,
SAddr(address),
ASize(size),
/* type */ AType::Unsigned, // `type` is only used for printing.
GenmcScalarExt::to_sval(value),
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&ret))
return StoreResultExt::from_error(format_error(*err));
const auto* is_co_max = std::get_if<bool>(&ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(!is_co_max, "Unimplemented: Store returned unexpected result.");
return StoreResultExt::ok(*is_co_max);
}
void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
const auto pos = inc_pos(thread_id);
GenMCDriver::handleFence(nullptr, pos, ord, EventDeps());
}
[[nodiscard]] auto MiriGenmcShim::handle_read_modify_write(
ThreadId thread_id,
uint64_t address,
uint64_t size,
RMWBinOp rmw_op,
MemOrdering ordering,
GenmcScalar rhs_value,
GenmcScalar old_val
) -> ReadModifyWriteResult {
// NOTE: Both the store and load events should get the same `ordering`, it should not be split
// into a load and a store component. This means we can have for example `AcqRel` loads and
// stores, but this is intended for RMW operations.
// Somewhat confusingly, the GenMC term for RMW read/write labels is
// `FaiRead` and `FaiWrite`.
const auto load_ret = handle_load_reset_if_none<EventLabel::EventLabelKind::FaiRead>(
thread_id,
GenmcScalarExt::try_to_sval(old_val),
ordering,
SAddr(address),
ASize(size),
AType::Unsigned, // The type is only used for printing.
rmw_op,
GenmcScalarExt::to_sval(rhs_value),
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&load_ret))
return ReadModifyWriteResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&load_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(!ret_val, "Unimplemented: read-modify-write returned unexpected result.");
const auto read_old_val = *ret_val;
const auto new_value =
executeRMWBinOp(read_old_val, GenmcScalarExt::to_sval(rhs_value), size, rmw_op);
const auto storePos = inc_pos(thread_id);
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::FaiWrite>(
nullptr,
storePos,
GenmcScalarExt::try_to_sval(old_val),
ordering,
SAddr(address),
ASize(size),
AType::Unsigned, // The type is only used for printing.
new_value
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
return ReadModifyWriteResultExt::from_error(format_error(*err));
const auto* is_co_max = std::get_if<bool>(&store_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(!is_co_max, "Unimplemented: RMW store returned unexpected result.");
return ReadModifyWriteResultExt::ok(
/* old_value: */ read_old_val,
new_value,
*is_co_max
);
}
[[nodiscard]] auto MiriGenmcShim::handle_compare_exchange(
ThreadId thread_id,
uint64_t address,
uint64_t size,
GenmcScalar expected_value,
GenmcScalar new_value,
GenmcScalar old_val,
MemOrdering success_ordering,
MemOrdering fail_load_ordering,
bool can_fail_spuriously
) -> CompareExchangeResult {
// NOTE: Both the store and load events should get the same `ordering`, it should not be split
// into a load and a store component. This means we can have for example `AcqRel` loads and
// stores, but this is intended for CAS operations.
// FIXME(GenMC): properly handle failure memory ordering.
auto expectedVal = GenmcScalarExt::to_sval(expected_value);
auto new_val = GenmcScalarExt::to_sval(new_value);
const auto load_ret = handle_load_reset_if_none<EventLabel::EventLabelKind::CasRead>(
thread_id,
GenmcScalarExt::try_to_sval(old_val),
success_ordering,
SAddr(address),
ASize(size),
AType::Unsigned, // The type is only used for printing.
expectedVal,
new_val
);
if (const auto* err = std::get_if<VerificationError>(&load_ret))
return CompareExchangeResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&load_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(nullptr == ret_val, "Unimplemented: load returned unexpected result.");
const auto read_old_val = *ret_val;
if (read_old_val != expectedVal)
return CompareExchangeResultExt::failure(read_old_val);
// FIXME(GenMC): Add support for modelling spurious failures.
const auto storePos = inc_pos(thread_id);
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::CasWrite>(
nullptr,
storePos,
GenmcScalarExt::try_to_sval(old_val),
success_ordering,
SAddr(address),
ASize(size),
AType::Unsigned, // The type is only used for printing.
new_val
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
return CompareExchangeResultExt::from_error(format_error(*err));
const auto* is_co_max = std::get_if<bool>(&store_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(!is_co_max, "Unimplemented: compare-exchange store returned unexpected result.");
return CompareExchangeResultExt::success(read_old_val, *is_co_max);
}
/**** Memory (de)allocation ****/
auto MiriGenmcShim::handle_malloc(ThreadId thread_id, uint64_t size, uint64_t alignment)
-> uint64_t {
const auto pos = inc_pos(thread_id);
// These are only used for printing and features Miri-GenMC doesn't support (yet).
const auto storage_duration = StorageDuration::SD_Heap;
// Volatile, as opposed to "persistent" (i.e., non-volatile memory that persists over reboots)
const auto storage_type = StorageType::ST_Volatile;
const auto address_space = AddressSpace::AS_User;
const SVal ret_val = GenMCDriver::handleMalloc(
nullptr,
pos,
size,
alignment,
storage_duration,
storage_type,
address_space,
EventDeps()
);
return ret_val.get();
}
auto MiriGenmcShim::handle_free(ThreadId thread_id, uint64_t address)
-> std::unique_ptr<std::string> {
auto pos = inc_pos(thread_id);
auto ret = GenMCDriver::handleFree(nullptr, pos, SAddr(address), EventDeps());
return ret.has_value() ? format_error(*ret) : nullptr;
}
/**** Estimation mode result ****/
@ -66,3 +305,187 @@ auto MiriGenmcShim::get_estimation_results() const -> EstimationResult {
.blocked_execs = static_cast<uint64_t>(res.exploredBlocked),
};
}
/** Mutexes */
struct MutexState {
static constexpr SVal UNLOCKED { 0 };
static constexpr SVal LOCKED { 1 };
static constexpr bool isValid(SVal v) {
return v == UNLOCKED || v == LOCKED;
}
};
auto MiriGenmcShim::handle_mutex_lock(ThreadId thread_id, uint64_t address, uint64_t size)
-> MutexLockResult {
// This annotation informs GenMC about the condition required to make this lock call succeed.
// It stands for `value_read_by_load != MUTEX_LOCKED`.
const auto size_bits = size * 8;
const auto annot = std::move(Annotation(
AssumeType::Spinloop,
Annotation::ExprVP(
NeExpr<ModuleID::ID>::create(
// `RegisterExpr` marks the value of the current expression, i.e., the loaded value.
// The `id` is ignored by GenMC; it is only used by the LLI frontend to substitute
// other variables from previous expressions that may be used here.
RegisterExpr<ModuleID::ID>::create(size_bits, /* id */ 0),
ConcreteExpr<ModuleID::ID>::create(size_bits, MutexState::LOCKED)
)
.release()
)
));
// As usual, we need to tell GenMC which value was stored at this location before this atomic
// access, if there previously was a non-atomic initializing access. We set the initial state of
// a mutex to be "unlocked".
const auto old_val = MutexState::UNLOCKED;
const auto load_ret = handle_load_reset_if_none<EventLabel::EventLabelKind::LockCasRead>(
thread_id,
old_val,
address,
size,
annot,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&load_ret))
return MutexLockResultExt::from_error(format_error(*err));
// If we get a `Reset`, GenMC decided that this lock operation should not yet run, since it
// would not acquire the mutex. Like the handling of the case further down where we read a `1`
// ("Mutex already locked"), Miri should call the handle function again once the current thread
// is scheduled by GenMC the next time.
if (std::holds_alternative<Reset>(load_ret))
return MutexLockResultExt::reset();
const auto* ret_val = std::get_if<SVal>(&load_ret);
ERROR_ON(!ret_val, "Unimplemented: mutex lock returned unexpected result.");
ERROR_ON(!MutexState::isValid(*ret_val), "Mutex read value was neither 0 nor 1");
if (*ret_val == MutexState::LOCKED) {
// We did not acquire the mutex, so we tell GenMC to block the thread until we can acquire
// it. GenMC determines this based on the annotation we pass with the load further up in
// this function, namely when that load will read a value other than `MutexState::LOCKED`.
this->handle_assume_block(thread_id, AssumeType::Spinloop);
return MutexLockResultExt::ok(false);
}
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::LockCasWrite>(
nullptr,
inc_pos(thread_id),
old_val,
address,
size,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
return MutexLockResultExt::from_error(format_error(*err));
// We don't update Miri's memory for this operation so we don't need to know if the store
// was the co-maximal store, but we still check that we at least get a boolean as the result
// of the store.
const auto* is_co_max = std::get_if<bool>(&store_ret);
ERROR_ON(!is_co_max, "Unimplemented: mutex_try_lock store returned unexpected result.");
return MutexLockResultExt::ok(true);
}
auto MiriGenmcShim::handle_mutex_try_lock(ThreadId thread_id, uint64_t address, uint64_t size)
-> MutexLockResult {
auto& currPos = threads_action_[thread_id].event;
// As usual, we need to tell GenMC which value was stored at this location before this atomic
// access, if there previously was a non-atomic initializing access. We set the initial state of
// a mutex to be "unlocked".
const auto old_val = MutexState::UNLOCKED;
const auto load_ret = GenMCDriver::handleLoad<EventLabel::EventLabelKind::TrylockCasRead>(
nullptr,
++currPos,
old_val,
SAddr(address),
ASize(size)
);
if (const auto* err = std::get_if<VerificationError>(&load_ret))
return MutexLockResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&load_ret);
ERROR_ON(!ret_val, "Unimplemented: mutex trylock load returned unexpected result.");
ERROR_ON(!MutexState::isValid(*ret_val), "Mutex read value was neither 0 nor 1");
if (*ret_val == MutexState::LOCKED)
return MutexLockResultExt::ok(false); /* Lock already held. */
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::TrylockCasWrite>(
nullptr,
++currPos,
old_val,
SAddr(address),
ASize(size)
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
return MutexLockResultExt::from_error(format_error(*err));
// We don't update Miri's memory for this operation so we don't need to know if the store was
// co-maximal, but we still check that we get a boolean result.
const auto* is_co_max = std::get_if<bool>(&store_ret);
ERROR_ON(!is_co_max, "Unimplemented: store part of mutex try_lock returned unexpected result.");
return MutexLockResultExt::ok(true);
}
auto MiriGenmcShim::handle_mutex_unlock(ThreadId thread_id, uint64_t address, uint64_t size)
-> StoreResult {
const auto pos = inc_pos(thread_id);
const auto ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::UnlockWrite>(
nullptr,
pos,
// As usual, we need to tell GenMC which value was stored at this location before this
// atomic access, if there previously was a non-atomic initializing access. We set the
// initial state of a mutex to be "unlocked".
/* old_val */ MutexState::UNLOCKED,
MemOrdering::Release,
SAddr(address),
ASize(size),
AType::Signed,
/* store_value */ MutexState::UNLOCKED,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&ret))
return StoreResultExt::from_error(format_error(*err));
const auto* is_co_max = std::get_if<bool>(&ret);
ERROR_ON(!is_co_max, "Unimplemented: store part of mutex unlock returned unexpected result.");
return StoreResultExt::ok(*is_co_max);
}
/** Thread creation/joining */
void MiriGenmcShim::handle_thread_create(ThreadId thread_id, ThreadId parent_id) {
// NOTE: The threadCreate event happens in the parent:
const auto pos = inc_pos(parent_id);
// FIXME(genmc): for supporting symmetry reduction, these will need to be properly set:
const unsigned fun_id = 0;
const SVal arg = SVal(0);
const ThreadInfo child_info =
ThreadInfo { thread_id, parent_id, fun_id, arg, "unknown thread" };
const auto child_tid = GenMCDriver::handleThreadCreate(nullptr, pos, child_info, EventDeps());
// Sanity check the thread id, which is the index in the `threads_action_` array.
BUG_ON(child_tid != thread_id || child_tid <= 0 || child_tid != threads_action_.size());
threads_action_.push_back(Action(ActionKind::Load, Event(child_tid, 0)));
}
void MiriGenmcShim::handle_thread_join(ThreadId thread_id, ThreadId child_id) {
// The thread join event happens in the parent.
const auto pos = inc_pos(thread_id);
const auto ret = GenMCDriver::handleThreadJoin(nullptr, pos, child_id, EventDeps());
// If the join failed, decrease the event index again:
if (!std::holds_alternative<SVal>(ret)) {
dec_pos(thread_id);
}
// FIXME(genmc): handle `HandleResult::{Invalid, Reset, VerificationError}` return values.
// NOTE: Thread return value is ignored, since Miri doesn't need it.
}
void MiriGenmcShim::handle_thread_finish(ThreadId thread_id, uint64_t ret_val) {
const auto pos = inc_pos(thread_id);
GenMCDriver::handleThreadFinish(nullptr, pos, SVal(ret_val));
}
void MiriGenmcShim::handle_thread_kill(ThreadId thread_id) {
const auto pos = inc_pos(thread_id);
GenMCDriver::handleThreadKill(nullptr, pos);
}

View file

@ -1,163 +0,0 @@
/** This file contains functionality related to handling mutexes. */
#include "MiriInterface.hpp"
// GenMC headers:
#include "Static/ModuleID.hpp"
// CXX.rs generated headers:
#include "genmc-sys/src/lib.rs.h"
#define MUTEX_UNLOCKED SVal(0)
#define MUTEX_LOCKED SVal(1)
auto MiriGenmcShim::handle_mutex_lock(ThreadId thread_id, uint64_t address, uint64_t size)
-> MutexLockResult {
// This annotation informs GenMC about the condition required to make this lock call succeed.
// It stands for `value_read_by_load != MUTEX_LOCKED`.
const auto size_bits = size * 8;
const auto annot = std::move(Annotation(
AssumeType::Spinloop,
Annotation::ExprVP(
NeExpr<ModuleID::ID>::create(
// `RegisterExpr` marks the value of the current expression, i.e., the loaded value.
// The `id` is ignored by GenMC; it is only used by the LLI frontend to substitute
// other variables from previous expressions that may be used here.
RegisterExpr<ModuleID::ID>::create(size_bits, /* id */ 0),
ConcreteExpr<ModuleID::ID>::create(size_bits, MUTEX_LOCKED)
)
.release()
)
));
// As usual, we need to tell GenMC which value was stored at this location before this atomic
// access, if there previously was a non-atomic initializing access. We set the initial state of
// a mutex to be "unlocked".
const auto old_val = MUTEX_UNLOCKED;
const auto load_ret = handle_load_reset_if_none<EventLabel::EventLabelKind::LockCasRead>(
thread_id,
old_val,
address,
size,
annot,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&load_ret))
return MutexLockResultExt::from_error(format_error(*err));
// If we get a `Reset`, GenMC decided that this lock operation should not yet run, since it
// would not acquire the mutex. Like the handling of the case further down where we read a `1`
// ("Mutex already locked"), Miri should call the handle function again once the current thread
// is scheduled by GenMC the next time.
if (std::holds_alternative<Reset>(load_ret))
return MutexLockResultExt::reset();
const auto* ret_val = std::get_if<SVal>(&load_ret);
ERROR_ON(!ret_val, "Unimplemented: mutex lock returned unexpected result.");
ERROR_ON(
*ret_val != MUTEX_UNLOCKED && *ret_val != MUTEX_LOCKED,
"Mutex read value was neither 0 nor 1"
);
const bool is_lock_acquired = *ret_val == MUTEX_UNLOCKED;
if (is_lock_acquired) {
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::LockCasWrite>(
nullptr,
inc_pos(thread_id),
old_val,
address,
size,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
return MutexLockResultExt::from_error(format_error(*err));
// We don't update Miri's memory for this operation so we don't need to know if the store
// was the co-maximal store, but we still check that we at least get a boolean as the result
// of the store.
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&store_ret);
ERROR_ON(
nullptr == is_coherence_order_maximal_write,
"Unimplemented: store part of mutex try_lock returned unexpected result."
);
} else {
// We did not acquire the mutex, so we tell GenMC to block the thread until we can acquire
// it. GenMC determines this based on the annotation we pass with the load further up in
// this function, namely when that load will read a value other than `MUTEX_LOCKED`.
this->handle_assume_block(thread_id, AssumeType::Spinloop);
}
return MutexLockResultExt::ok(is_lock_acquired);
}
auto MiriGenmcShim::handle_mutex_try_lock(ThreadId thread_id, uint64_t address, uint64_t size)
-> MutexLockResult {
auto& currPos = threads_action_[thread_id].event;
// As usual, we need to tell GenMC which value was stored at this location before this atomic
// access, if there previously was a non-atomic initializing access. We set the initial state of
// a mutex to be "unlocked".
const auto old_val = MUTEX_UNLOCKED;
const auto load_ret = GenMCDriver::handleLoad<EventLabel::EventLabelKind::TrylockCasRead>(
nullptr,
++currPos,
old_val,
SAddr(address),
ASize(size)
);
if (const auto* err = std::get_if<VerificationError>(&load_ret))
return MutexLockResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&load_ret);
if (nullptr == ret_val) {
ERROR("Unimplemented: mutex trylock load returned unexpected result.");
}
ERROR_ON(
*ret_val != MUTEX_UNLOCKED && *ret_val != MUTEX_LOCKED,
"Mutex read value was neither 0 nor 1"
);
const bool is_lock_acquired = *ret_val == MUTEX_UNLOCKED;
if (!is_lock_acquired) {
return MutexLockResultExt::ok(false); /* Lock already held. */
}
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::TrylockCasWrite>(
nullptr,
++currPos,
old_val,
SAddr(address),
ASize(size)
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
return MutexLockResultExt::from_error(format_error(*err));
// We don't update Miri's memory for this operation so we don't need to know if the store was
// co-maximal, but we still check that we get a boolean result.
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&store_ret);
ERROR_ON(
nullptr == is_coherence_order_maximal_write,
"Unimplemented: store part of mutex try_lock returned unexpected result."
);
return MutexLockResultExt::ok(true);
}
auto MiriGenmcShim::handle_mutex_unlock(ThreadId thread_id, uint64_t address, uint64_t size)
-> StoreResult {
const auto pos = inc_pos(thread_id);
const auto ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::UnlockWrite>(
nullptr,
pos,
// As usual, we need to tell GenMC which value was stored at this location before this
// atomic access, if there previously was a non-atomic initializing access. We set the
// initial state of a mutex to be "unlocked".
/* old_val */ MUTEX_UNLOCKED,
MemOrdering::Release,
SAddr(address),
ASize(size),
AType::Signed,
/* store_value */ MUTEX_UNLOCKED,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&ret))
return StoreResultExt::from_error(format_error(*err));
const bool* is_coherence_order_maximal_write = std::get_if<bool>(&ret);
ERROR_ON(
nullptr == is_coherence_order_maximal_write,
"Unimplemented: store part of mutex unlock returned unexpected result."
);
return StoreResultExt::ok(*is_coherence_order_maximal_write);
}

View file

@ -1,56 +0,0 @@
/** This file contains functionality related thread management (creation, finishing, join, etc.) */
#include "MiriInterface.hpp"
// CXX.rs generated headers:
#include "genmc-sys/src/lib.rs.h"
// GenMC headers:
#include "Support/Error.hpp"
#include "Support/Verbosity.hpp"
// C++ headers:
#include <cstdint>
void MiriGenmcShim::handle_thread_create(ThreadId thread_id, ThreadId parent_id) {
// NOTE: The threadCreate event happens in the parent:
const auto pos = inc_pos(parent_id);
// FIXME(genmc): for supporting symmetry reduction, these will need to be properly set:
const unsigned fun_id = 0;
const SVal arg = SVal(0);
const ThreadInfo child_info =
ThreadInfo { thread_id, parent_id, fun_id, arg, "unknown thread" };
// NOTE: Default memory ordering (`Release`) used here.
const auto child_tid = GenMCDriver::handleThreadCreate(nullptr, pos, child_info, EventDeps());
// Sanity check the thread id, which is the index in the `threads_action_` array.
BUG_ON(child_tid != thread_id || child_tid <= 0 || child_tid != threads_action_.size());
threads_action_.push_back(Action(ActionKind::Load, Event(child_tid, 0)));
}
void MiriGenmcShim::handle_thread_join(ThreadId thread_id, ThreadId child_id) {
// The thread join event happens in the parent.
const auto pos = inc_pos(thread_id);
// NOTE: Default memory ordering (`Acquire`) used here.
const auto ret = GenMCDriver::handleThreadJoin(nullptr, pos, child_id, EventDeps());
// If the join failed, decrease the event index again:
if (!std::holds_alternative<SVal>(ret)) {
dec_pos(thread_id);
}
// FIXME(genmc): handle `HandleResult::{Invalid, Reset, VerificationError}` return values.
// NOTE: Thread return value is ignored, since Miri doesn't need it.
}
void MiriGenmcShim::handle_thread_finish(ThreadId thread_id, uint64_t ret_val) {
const auto pos = inc_pos(thread_id);
// NOTE: Default memory ordering (`Release`) used here.
GenMCDriver::handleThreadFinish(nullptr, pos, SVal(ret_val));
}
void MiriGenmcShim::handle_thread_kill(ThreadId thread_id) {
const auto pos = inc_pos(thread_id);
GenMCDriver::handleThreadKill(nullptr, pos);
}

View file

@ -438,7 +438,11 @@ mod ffi {
alignment: u64,
) -> u64;
/// Returns true if an error was found.
fn handle_free(self: Pin<&mut MiriGenmcShim>, thread_id: i32, address: u64) -> bool;
fn handle_free(
self: Pin<&mut MiriGenmcShim>,
thread_id: i32,
address: u64,
) -> UniquePtr<CxxString>;
/**** Thread management ****/
fn handle_thread_create(self: Pin<&mut MiriGenmcShim>, thread_id: i32, parent_id: i32);

View file

@ -1 +1 @@
69d4d5fc0e4db60272aac85ef27ecccef5764f3a
1eb0657f78777f0b4d6bcc49c126d5d35212cae5

View file

@ -29,6 +29,11 @@ extern crate rustc_target;
/// above, instead of via Cargo as you'd normally do. This is currently needed for LTO due to
/// https://github.com/rust-lang/cc-rs/issues/1613.
#[cfg(feature = "jemalloc")]
// Make sure `--all-features` works: only Linux and macOS actually use jemalloc, and not on arm32.
#[cfg(all(
any(target_os = "linux", target_os = "macos"),
any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"),
))]
extern crate tikv_jemalloc_sys as _;
mod log;
@ -176,7 +181,7 @@ fn make_miri_codegen_backend(opts: &Options, target: &Target) -> Box<dyn Codegen
// Use the target_config method of the default codegen backend (eg LLVM) to ensure the
// calculated target features match said backend by respecting eg -Ctarget-cpu.
let target_config_backend =
rustc_interface::util::get_codegen_backend(&early_dcx, &opts.sysroot, None, &target);
rustc_interface::util::get_codegen_backend(&early_dcx, &opts.sysroot, None, target);
let target_config_backend_init = Once::new();
Box::new(DummyCodegenBackend {

View file

@ -336,23 +336,10 @@ struct NodeAppArgs<'visit> {
/// The permissions map of this tree.
loc: &'visit mut LocationTree,
}
/// Data given to the error handler
struct ErrHandlerArgs<'node, InErr> {
/// Kind of error that occurred
error_kind: InErr,
/// Tag that triggered the error (not the tag that was accessed,
/// rather the parent tag that had insufficient permissions or the
/// non-parent tag that had a protector).
conflicting_info: &'node NodeDebugInfo,
/// Information about the tag that was accessed just before the
/// error was triggered.
accessed_info: &'node NodeDebugInfo,
}
/// Internal contents of `Tree` with the minimum of mutable access for
/// the purposes of the tree traversal functions: the permissions (`perms`) can be
/// updated but not the tree structure (`tag_mapping` and `nodes`)
/// For soundness do not modify the children or parent indexes of nodes
/// during traversal.
struct TreeVisitor<'tree> {
tag_mapping: &'tree UniKeyMap<BorTag>,
nodes: &'tree mut UniValMap<Node>,
loc: &'tree mut LocationTree,
}
@ -377,16 +364,12 @@ enum RecursionState {
/// Stack of nodes left to explore in a tree traversal.
/// See the docs of `traverse_this_parents_children_other` for details on the
/// traversal order.
struct TreeVisitorStack<NodeContinue, NodeApp, ErrHandler> {
/// Identifier of the original access.
initial: UniIndex,
struct TreeVisitorStack<NodeContinue, NodeApp> {
/// Function describing whether to continue at a tag.
/// This is only invoked for foreign accesses.
f_continue: NodeContinue,
/// Function to apply to each tag.
f_propagate: NodeApp,
/// Handler to add the required context to diagnostics.
err_builder: ErrHandler,
/// Mutable state of the visit: the tags left to handle.
/// Every tag pushed should eventually be handled,
/// and the precise order is relevant for diagnostics.
@ -398,12 +381,10 @@ struct TreeVisitorStack<NodeContinue, NodeApp, ErrHandler> {
stack: Vec<(UniIndex, AccessRelatedness, RecursionState)>,
}
impl<NodeContinue, NodeApp, InnErr, OutErr, ErrHandler>
TreeVisitorStack<NodeContinue, NodeApp, ErrHandler>
impl<NodeContinue, NodeApp, Err> TreeVisitorStack<NodeContinue, NodeApp>
where
NodeContinue: Fn(&NodeAppArgs<'_>) -> ContinueTraversal,
NodeApp: Fn(NodeAppArgs<'_>) -> Result<(), InnErr>,
ErrHandler: Fn(ErrHandlerArgs<'_, InnErr>) -> OutErr,
NodeApp: Fn(NodeAppArgs<'_>) -> Result<(), Err>,
{
fn should_continue_at(
&self,
@ -420,16 +401,8 @@ where
this: &mut TreeVisitor<'_>,
idx: UniIndex,
rel_pos: AccessRelatedness,
) -> Result<(), OutErr> {
(self.f_propagate)(NodeAppArgs { idx, rel_pos, nodes: this.nodes, loc: this.loc }).map_err(
|error_kind| {
(self.err_builder)(ErrHandlerArgs {
error_kind,
conflicting_info: &this.nodes.get(idx).unwrap().debug_info,
accessed_info: &this.nodes.get(self.initial).unwrap().debug_info,
})
},
)
) -> Result<(), Err> {
(self.f_propagate)(NodeAppArgs { idx, rel_pos, nodes: this.nodes, loc: this.loc })
}
fn go_upwards_from_accessed(
@ -437,7 +410,7 @@ where
this: &mut TreeVisitor<'_>,
accessed_node: UniIndex,
visit_children: ChildrenVisitMode,
) -> Result<(), OutErr> {
) -> Result<(), Err> {
// We want to visit the accessed node's children first.
// However, we will below walk up our parents and push their children (our cousins)
// onto the stack. To ensure correct iteration order, this method thus finishes
@ -485,7 +458,7 @@ where
Ok(())
}
fn finish_foreign_accesses(&mut self, this: &mut TreeVisitor<'_>) -> Result<(), OutErr> {
fn finish_foreign_accesses(&mut self, this: &mut TreeVisitor<'_>) -> Result<(), Err> {
while let Some((idx, rel_pos, step)) = self.stack.last_mut() {
let idx = *idx;
let rel_pos = *rel_pos;
@ -521,26 +494,21 @@ where
Ok(())
}
fn new(
initial: UniIndex,
f_continue: NodeContinue,
f_propagate: NodeApp,
err_builder: ErrHandler,
) -> Self {
Self { initial, f_continue, f_propagate, err_builder, stack: Vec::new() }
fn new(f_continue: NodeContinue, f_propagate: NodeApp) -> Self {
Self { f_continue, f_propagate, stack: Vec::new() }
}
}
impl<'tree> TreeVisitor<'tree> {
/// Applies `f_propagate` to every vertex of the tree in a piecewise bottom-up way: First, visit
/// all ancestors of `start` (starting with `start` itself), then children of `start`, then the rest,
/// all ancestors of `start_idx` (starting with `start_idx` itself), then children of `start_idx`, then the rest,
/// going bottom-up in each of these two "pieces" / sections.
/// This ensures that errors are triggered in the following order
/// - first invalid accesses with insufficient permissions, closest to the accessed node first,
/// - then protector violations, bottom-up, starting with the children of the accessed node, and then
/// going upwards and outwards.
///
/// The following graphic visualizes it, with numbers indicating visitation order and `start` being
/// The following graphic visualizes it, with numbers indicating visitation order and `start_idx` being
/// the node that is visited first ("1"):
///
/// ```text
@ -558,7 +526,7 @@ impl<'tree> TreeVisitor<'tree> {
/// ```
///
/// `f_propagate` should follow the following format: for a given `Node` it updates its
/// `Permission` depending on the position relative to `start` (given by an
/// `Permission` depending on the position relative to `start_idx` (given by an
/// `AccessRelatedness`).
/// `f_continue` is called earlier on foreign nodes, and describes whether to even start
/// visiting the subtree at that node. If it e.g. returns `SkipSelfAndChildren` on node 6
@ -568,15 +536,13 @@ impl<'tree> TreeVisitor<'tree> {
/// Finally, remember that the iteration order is not relevant for UB, it only affects
/// diagnostics. It also affects tree traversal optimizations built on top of this, so
/// those need to be reviewed carefully as well whenever this changes.
fn traverse_this_parents_children_other<InnErr, OutErr>(
fn traverse_this_parents_children_other<Err>(
mut self,
start: BorTag,
start_idx: UniIndex,
f_continue: impl Fn(&NodeAppArgs<'_>) -> ContinueTraversal,
f_propagate: impl Fn(NodeAppArgs<'_>) -> Result<(), InnErr>,
err_builder: impl Fn(ErrHandlerArgs<'_, InnErr>) -> OutErr,
) -> Result<(), OutErr> {
let start_idx = self.tag_mapping.get(&start).unwrap();
let mut stack = TreeVisitorStack::new(start_idx, f_continue, f_propagate, err_builder);
f_propagate: impl Fn(NodeAppArgs<'_>) -> Result<(), Err>,
) -> Result<(), Err> {
let mut stack = TreeVisitorStack::new(f_continue, f_propagate);
// Visits the accessed node itself, and all its parents, i.e. all nodes
// undergoing a child access. Also pushes the children and the other
// cousin nodes (i.e. all nodes undergoing a foreign access) to the stack
@ -592,16 +558,14 @@ impl<'tree> TreeVisitor<'tree> {
stack.finish_foreign_accesses(&mut self)
}
/// Like `traverse_this_parents_children_other`, but skips the children of `start`.
fn traverse_nonchildren<InnErr, OutErr>(
/// Like `traverse_this_parents_children_other`, but skips the children of `start_idx`.
fn traverse_nonchildren<Err>(
mut self,
start: BorTag,
start_idx: UniIndex,
f_continue: impl Fn(&NodeAppArgs<'_>) -> ContinueTraversal,
f_propagate: impl Fn(NodeAppArgs<'_>) -> Result<(), InnErr>,
err_builder: impl Fn(ErrHandlerArgs<'_, InnErr>) -> OutErr,
) -> Result<(), OutErr> {
let start_idx = self.tag_mapping.get(&start).unwrap();
let mut stack = TreeVisitorStack::new(start_idx, f_continue, f_propagate, err_builder);
f_propagate: impl Fn(NodeAppArgs<'_>) -> Result<(), Err>,
) -> Result<(), Err> {
let mut stack = TreeVisitorStack::new(f_continue, f_propagate);
// Visits the accessed node itself, and all its parents, i.e. all nodes
// undergoing a child access. Also pushes the other cousin nodes to the
// stack, but not the children of the accessed node.
@ -645,7 +609,7 @@ impl Tree {
);
nodes
};
let rperms = {
let locations = {
let mut perms = UniValMap::default();
// We manually set it to `Unique` on all in-bounds positions.
// We also ensure that it is accessed, so that no `Unique` but
@ -661,7 +625,7 @@ impl Tree {
let wildcard_accesses = UniValMap::default();
DedupRangeMap::new(size, LocationTree { perms, wildcard_accesses })
};
Self { root: root_idx, nodes, locations: rperms, tag_mapping }
Self { root: root_idx, nodes, locations, tag_mapping }
}
}
@ -808,58 +772,52 @@ impl<'tcx> Tree {
span,
)?;
// The order in which we check if any nodes are invalidated only
// matters to diagnostics, so we use the root as a default tag.
let start_idx = match prov {
ProvenanceExtra::Concrete(tag) => self.tag_mapping.get(&tag).unwrap(),
ProvenanceExtra::Wildcard => self.root,
};
// Check if this breaks any strong protector.
// (Weak protectors are already handled by `perform_access`.)
for (loc_range, loc) in self.locations.iter_mut(access_range.start, access_range.size) {
// The order in which we check if any nodes are invalidated only
// matters to diagnostics, so we use the root as a default tag.
let start_tag = match prov {
ProvenanceExtra::Concrete(tag) => tag,
ProvenanceExtra::Wildcard => self.nodes.get(self.root).unwrap().tag,
};
TreeVisitor { nodes: &mut self.nodes, tag_mapping: &self.tag_mapping, loc }
.traverse_this_parents_children_other(
start_tag,
// Visit all children, skipping none.
|_| ContinueTraversal::Recurse,
|args: NodeAppArgs<'_>| -> Result<(), TransitionError> {
let node = args.nodes.get(args.idx).unwrap();
let perm = args.loc.perms.entry(args.idx);
TreeVisitor { nodes: &mut self.nodes, loc }.traverse_this_parents_children_other(
start_idx,
// Visit all children, skipping none.
|_| ContinueTraversal::Recurse,
|args: NodeAppArgs<'_>| {
let node = args.nodes.get(args.idx).unwrap();
let perm = args.loc.perms.entry(args.idx);
let perm =
perm.get().copied().unwrap_or_else(|| node.default_location_state());
if global.borrow().protected_tags.get(&node.tag)
let perm = perm.get().copied().unwrap_or_else(|| node.default_location_state());
if global.borrow().protected_tags.get(&node.tag)
== Some(&ProtectorKind::StrongProtector)
// Don't check for protector if it is a Cell (see `unsafe_cell_deallocate` in `interior_mutability.rs`).
// Related to https://github.com/rust-lang/rust/issues/55005.
&& !perm.permission.is_cell()
// Only trigger UB if the accessed bit is set, i.e. if the protector is actually protecting this offset. See #4579.
&& perm.accessed
{
Err(TransitionError::ProtectedDealloc)
} else {
Ok(())
}
},
|args: ErrHandlerArgs<'_, TransitionError>| -> InterpErrorKind<'tcx> {
let ErrHandlerArgs { error_kind, conflicting_info, accessed_info } = args;
TbError {
conflicting_info,
{
Err(TbError {
conflicting_info: &node.debug_info,
access_cause: diagnostics::AccessCause::Dealloc,
alloc_id,
error_offset: loc_range.start,
error_kind,
error_kind: TransitionError::ProtectedDealloc,
accessed_info: match prov {
ProvenanceExtra::Concrete(_) => Some(accessed_info),
// `accessed_info` contains the info of `start_tag`.
// On a wildcard access this is not the info of the accessed tag
// (as we don't know the accessed tag).
ProvenanceExtra::Concrete(_) =>
Some(&args.nodes.get(start_idx).unwrap().debug_info),
// We don't know from where the access came during a wildcard access.
ProvenanceExtra::Wildcard => None,
},
}
.build()
},
)?;
.build())
} else {
Ok(())
}
},
)?;
}
interp_ok(())
}
@ -890,81 +848,32 @@ impl<'tcx> Tree {
alloc_id: AllocId, // diagnostics
span: Span, // diagnostics
) -> InterpResult<'tcx> {
let ProvenanceExtra::Concrete(tag) = prov else {
return self.perform_wildcard_access(access_range_and_kind, global, alloc_id, span);
};
use std::ops::Range;
// Performs the per-node work:
// - insert the permission if it does not exist
// - perform the access
// - record the transition
// to which some optimizations are added:
// - skip the traversal of the children in some cases
// - do not record noop transitions
//
// `perms_range` is only for diagnostics (it is the range of
// the `RangeMap` on which we are currently working).
let node_skipper = |access_kind: AccessKind, args: &NodeAppArgs<'_>| -> ContinueTraversal {
let node = args.nodes.get(args.idx).unwrap();
let perm = args.loc.perms.get(args.idx);
let old_state = perm.copied().unwrap_or_else(|| node.default_location_state());
old_state.skip_if_known_noop(access_kind, args.rel_pos)
};
let node_app = |perms_range: Range<u64>,
access_kind: AccessKind,
access_cause: diagnostics::AccessCause,
args: NodeAppArgs<'_>|
-> Result<(), TransitionError> {
let node = args.nodes.get_mut(args.idx).unwrap();
let mut perm = args.loc.perms.entry(args.idx);
let state = perm.or_insert(node.default_location_state());
let protected = global.borrow().protected_tags.contains_key(&node.tag);
state.perform_transition(
args.idx,
args.nodes,
&mut args.loc.wildcard_accesses,
access_kind,
access_cause,
/* access_range */ access_range_and_kind.map(|x| x.0),
args.rel_pos,
span,
perms_range,
protected,
)
};
// Error handler in case `node_app` goes wrong.
// Wraps the faulty transition in more context for diagnostics.
let err_handler = |perms_range: Range<u64>,
access_cause: diagnostics::AccessCause,
args: ErrHandlerArgs<'_, TransitionError>|
-> InterpErrorKind<'tcx> {
let ErrHandlerArgs { error_kind, conflicting_info, accessed_info } = args;
TbError {
conflicting_info,
access_cause,
alloc_id,
error_offset: perms_range.start,
error_kind,
accessed_info: Some(accessed_info),
}
.build()
#[cfg(feature = "expensive-consistency-checks")]
if matches!(prov, ProvenanceExtra::Wildcard) {
self.verify_wildcard_consistency(global);
}
let source_idx = match prov {
ProvenanceExtra::Concrete(tag) => Some(self.tag_mapping.get(&tag).unwrap()),
ProvenanceExtra::Wildcard => None,
};
if let Some((access_range, access_kind, access_cause)) = access_range_and_kind {
// Default branch: this is a "normal" access through a known range.
// We iterate over affected locations and traverse the tree for each of them.
for (loc_range, loc) in self.locations.iter_mut(access_range.start, access_range.size) {
TreeVisitor { nodes: &mut self.nodes, tag_mapping: &self.tag_mapping, loc }
.traverse_this_parents_children_other(
tag,
|args| node_skipper(access_kind, args),
|args| node_app(loc_range.clone(), access_kind, access_cause, args),
|args| err_handler(loc_range.clone(), access_cause, args),
)?;
loc.perform_access(
self.root,
&mut self.nodes,
source_idx,
loc_range,
Some(access_range),
access_kind,
access_cause,
global,
alloc_id,
span,
ChildrenVisitMode::VisitChildrenOfAccessed,
)?;
}
} else {
// This is a special access through the entire allocation.
@ -976,21 +885,31 @@ impl<'tcx> Tree {
// See the test case `returned_mut_is_usable` from
// `tests/pass/tree_borrows/tree-borrows.rs` for an example of
// why this is important.
// Wildcard references are never protected. So this can never be
// called with a wildcard reference.
let source_idx = source_idx.unwrap();
for (loc_range, loc) in self.locations.iter_mut_all() {
let idx = self.tag_mapping.get(&tag).unwrap();
// Only visit accessed permissions
if let Some(p) = loc.perms.get(idx)
if let Some(p) = loc.perms.get(source_idx)
&& let Some(access_kind) = p.permission.protector_end_access()
&& p.accessed
{
let access_cause = diagnostics::AccessCause::FnExit(access_kind);
TreeVisitor { nodes: &mut self.nodes, tag_mapping: &self.tag_mapping, loc }
.traverse_nonchildren(
tag,
|args| node_skipper(access_kind, args),
|args| node_app(loc_range.clone(), access_kind, access_cause, args),
|args| err_handler(loc_range.clone(), access_cause, args),
)?;
loc.perform_access(
self.root,
&mut self.nodes,
Some(source_idx),
loc_range,
None,
access_kind,
access_cause,
global,
alloc_id,
span,
ChildrenVisitMode::SkipChildrenOfAccessed,
)?;
}
}
}
@ -1148,123 +1067,234 @@ impl Tree {
}
}
/// Methods for wildcard accesses.
impl<'tcx> Tree {
/// Analogous to `perform_access`, but we do not know from which exposed
/// reference the access happens.
pub fn perform_wildcard_access(
impl<'tcx> LocationTree {
/// Performs an access on this location.
/// * `access_source`: The index, if any, where the access came from.
/// * `visit_children`: Whether to skip updating the children of `access_source`.
fn perform_access(
&mut self,
access_range_and_kind: Option<(AllocRange, AccessKind, diagnostics::AccessCause)>,
root: UniIndex,
nodes: &mut UniValMap<Node>,
access_source: Option<UniIndex>,
loc_range: Range<u64>,
access_range: Option<AllocRange>,
access_kind: AccessKind,
access_cause: diagnostics::AccessCause,
global: &GlobalState,
alloc_id: AllocId, // diagnostics
span: Span, // diagnostics
visit_children: ChildrenVisitMode,
) -> InterpResult<'tcx> {
if let Some(idx) = access_source {
self.perform_normal_access(
idx,
nodes,
loc_range.clone(),
access_range,
access_kind,
access_cause,
global,
alloc_id,
span,
visit_children,
)
} else {
// `SkipChildrenOfAccessed` only gets set on protector release.
// Since a wildcard reference are never protected this assert shouldn't fail.
assert!(matches!(visit_children, ChildrenVisitMode::VisitChildrenOfAccessed));
self.perform_wildcard_access(
root,
nodes,
loc_range.clone(),
access_range,
access_kind,
access_cause,
global,
alloc_id,
span,
)
}
}
/// Performs a normal access on the tree containing `access_source`.
/// * `access_source`: The index of the tag being accessed.
/// * `visit_children`: Whether to skip the children of `access_source`
/// during the access. Used for protector end access.
fn perform_normal_access(
&mut self,
access_source: UniIndex,
nodes: &mut UniValMap<Node>,
loc_range: Range<u64>,
access_range: Option<AllocRange>,
access_kind: AccessKind,
access_cause: diagnostics::AccessCause,
global: &GlobalState,
alloc_id: AllocId, // diagnostics
span: Span, // diagnostics
visit_children: ChildrenVisitMode,
) -> InterpResult<'tcx> {
// Performs the per-node work:
// - insert the permission if it does not exist
// - perform the access
// - record the transition
// to which some optimizations are added:
// - skip the traversal of the children in some cases
// - do not record noop transitions
//
// `perms_range` is only for diagnostics (it is the range of
// the `RangeMap` on which we are currently working).
let node_skipper = |args: &NodeAppArgs<'_>| -> ContinueTraversal {
let node = args.nodes.get(args.idx).unwrap();
let perm = args.loc.perms.get(args.idx);
let old_state = perm.copied().unwrap_or_else(|| node.default_location_state());
old_state.skip_if_known_noop(access_kind, args.rel_pos)
};
let node_app = |args: NodeAppArgs<'_>| -> Result<(), _> {
let node = args.nodes.get_mut(args.idx).unwrap();
let mut perm = args.loc.perms.entry(args.idx);
let state = perm.or_insert(node.default_location_state());
let protected = global.borrow().protected_tags.contains_key(&node.tag);
state
.perform_transition(
args.idx,
args.nodes,
&mut args.loc.wildcard_accesses,
access_kind,
access_cause,
/* access_range */ access_range,
args.rel_pos,
span,
loc_range.clone(),
protected,
)
.map_err(|error_kind| {
TbError {
conflicting_info: &args.nodes.get(args.idx).unwrap().debug_info,
access_cause,
alloc_id,
error_offset: loc_range.start,
error_kind,
accessed_info: Some(&args.nodes.get(access_source).unwrap().debug_info),
}
.build()
})
};
let visitor = TreeVisitor { nodes, loc: self };
match visit_children {
ChildrenVisitMode::VisitChildrenOfAccessed =>
visitor.traverse_this_parents_children_other(access_source, node_skipper, node_app),
ChildrenVisitMode::SkipChildrenOfAccessed =>
visitor.traverse_nonchildren(access_source, node_skipper, node_app),
}
.into()
}
/// Performs a wildcard access on the tree with root `root`. Takes the `access_relatedness`
/// for each node from the `WildcardState` datastructure.
/// * `root`: Root of the tree being accessed.
fn perform_wildcard_access(
&mut self,
root: UniIndex,
nodes: &mut UniValMap<Node>,
loc_range: Range<u64>,
access_range: Option<AllocRange>,
access_kind: AccessKind,
access_cause: diagnostics::AccessCause,
global: &GlobalState,
alloc_id: AllocId, // diagnostics
span: Span, // diagnostics
) -> InterpResult<'tcx> {
#[cfg(feature = "expensive-consistency-checks")]
self.verify_wildcard_consistency(global);
let f_continue =
|idx: UniIndex, nodes: &UniValMap<Node>, loc: &LocationTree| -> ContinueTraversal {
let node = nodes.get(idx).unwrap();
let perm = loc.perms.get(idx);
let wildcard_state = loc.wildcard_accesses.get(idx).cloned().unwrap_or_default();
if let Some((access_range, access_kind, access_cause)) = access_range_and_kind {
// This does a traversal starting from the root through the tree updating
// the permissions of each node.
// The difference to `perform_access` is that we take the access
// relatedness from the wildcard tracking state of the node instead of
// from the visitor itself.
for (loc_range, loc) in self.locations.iter_mut(access_range.start, access_range.size) {
let root_tag = self.nodes.get(self.root).unwrap().tag;
TreeVisitor { loc, nodes: &mut self.nodes, tag_mapping: &self.tag_mapping }
.traverse_this_parents_children_other(
root_tag,
|args: &NodeAppArgs<'_>| -> ContinueTraversal {
let node = args.nodes.get(args.idx).unwrap();
let perm = args.loc.perms.get(args.idx);
let wildcard_state = args
.loc
.wildcard_accesses
.get(args.idx)
.cloned()
.unwrap_or_default();
let old_state = perm.copied().unwrap_or_else(|| node.default_location_state());
// If we know where, relative to this node, the wildcard access occurs,
// then check if we can skip the entire subtree.
if let Some(relatedness) = wildcard_state.access_relatedness(access_kind)
&& let Some(relatedness) = relatedness.to_relatedness()
{
// We can use the usual SIFA machinery to skip nodes.
old_state.skip_if_known_noop(access_kind, relatedness)
} else {
ContinueTraversal::Recurse
}
};
// This does a traversal starting from the root through the tree updating
// the permissions of each node.
// The difference to `perform_access` is that we take the access
// relatedness from the wildcard tracking state of the node instead of
// from the visitor itself.
TreeVisitor { loc: self, nodes }
.traverse_this_parents_children_other(
root,
|args| f_continue(args.idx, args.nodes, args.loc),
|args| {
let node = args.nodes.get_mut(args.idx).unwrap();
let mut entry = args.loc.perms.entry(args.idx);
let perm = entry.or_insert(node.default_location_state());
let old_state =
perm.copied().unwrap_or_else(|| node.default_location_state());
// If we know where, relative to this node, the wildcard access occurs,
// then check if we can skip the entire subtree.
if let Some(relatedness) =
wildcard_state.access_relatedness(access_kind)
&& let Some(relatedness) = relatedness.to_relatedness()
{
// We can use the usual SIFA machinery to skip nodes.
old_state.skip_if_known_noop(access_kind, relatedness)
} else {
ContinueTraversal::Recurse
}
},
|args| {
let node = args.nodes.get_mut(args.idx).unwrap();
let mut entry = args.loc.perms.entry(args.idx);
let perm = entry.or_insert(node.default_location_state());
let protected = global.borrow().protected_tags.contains_key(&node.tag);
let protected = global.borrow().protected_tags.contains_key(&node.tag);
let Some(wildcard_relatedness) = args
.loc
.wildcard_accesses
.get(args.idx)
.and_then(|s| s.access_relatedness(access_kind))
else {
// There doesn't exist a valid exposed reference for this access to
// happen through.
// If this fails for one id, then it fails for all ids so this.
// Since we always check the root first, this means it should always
// fail on the root.
assert_eq!(root, args.idx);
return Err(no_valid_exposed_references_error(
alloc_id,
loc_range.start,
access_cause,
));
};
let Some(wildcard_relatedness) = args
.loc
.wildcard_accesses
.get(args.idx)
.and_then(|s| s.access_relatedness(access_kind))
else {
// There doesn't exist a valid exposed reference for this access to
// happen through.
// If this fails for one id, then it fails for all ids so this.
// Since we always check the root first, this means it should always
// fail on the root.
assert_eq!(self.root, args.idx);
return Err(no_valid_exposed_references_error(
alloc_id,
loc_range.start,
access_cause,
));
};
let Some(relatedness) = wildcard_relatedness.to_relatedness() else {
// If the access type is Either, then we do not apply any transition
// to this node, but we still update each of its children.
// This is an imprecision! In the future, maybe we can still do some sort
// of best-effort update here.
return Ok(());
};
// We know the exact relatedness, so we can actually do precise checks.
perm.perform_transition(
args.idx,
args.nodes,
&mut args.loc.wildcard_accesses,
access_kind,
access_cause,
Some(access_range),
relatedness,
span,
loc_range.clone(),
protected,
)
.map_err(|trans| {
let node = args.nodes.get(args.idx).unwrap();
TbError {
conflicting_info: &node.debug_info,
access_cause,
alloc_id,
error_offset: loc_range.start,
error_kind: trans,
accessed_info: None,
}
.build()
})
},
|err| err.error_kind,
)?;
}
} else {
// This is for the special access when a protector gets released.
// Wildcard pointers are never protected, so this is unreachable.
unreachable!()
};
interp_ok(())
let Some(relatedness) = wildcard_relatedness.to_relatedness() else {
// If the access type is Either, then we do not apply any transition
// to this node, but we still update each of its children.
// This is an imprecision! In the future, maybe we can still do some sort
// of best-effort update here.
return Ok(());
};
// We know the exact relatedness, so we can actually do precise checks.
perm.perform_transition(
args.idx,
args.nodes,
&mut args.loc.wildcard_accesses,
access_kind,
access_cause,
access_range,
relatedness,
span,
loc_range.clone(),
protected,
)
.map_err(|trans| {
let node = args.nodes.get(args.idx).unwrap();
TbError {
conflicting_info: &node.debug_info,
access_cause,
alloc_id,
error_offset: loc_range.start,
error_kind: trans,
// We don't know from where the access came during a wildcard access.
accessed_info: None,
}
.build()
})
},
)
.into()
}
}

View file

@ -592,9 +592,11 @@ impl GenmcCtx {
genmc_size,
alignment.bytes(),
);
if chosen_address == 0 {
throw_exhaust!(AddressSpaceFull);
}
// Non-global addresses should not be in the global address space or null.
assert_ne!(0, chosen_address, "GenMC malloc returned nullptr.");
// Non-global addresses should not be in the global address space.
assert_eq!(0, chosen_address & GENMC_GLOBAL_ADDRESSES_MASK);
// Sanity check the address alignment:
assert!(
@ -622,15 +624,14 @@ impl GenmcCtx {
!self.get_alloc_data_races(),
"memory deallocation with data race checking disabled."
);
if self
let free_result = self
.handle
.borrow_mut()
.pin_mut()
.handle_free(self.active_thread_genmc_tid(machine), address.bytes())
{
.handle_free(self.active_thread_genmc_tid(machine), address.bytes());
if let Some(error) = free_result.as_ref() {
// FIXME(genmc): improve error handling.
// An error was detected, so we get the error string from GenMC.
throw_ub_format!("{}", self.try_get_error().unwrap());
throw_ub_format!("{}", error.to_string_lossy());
}
interp_ok(())

View file

@ -30,7 +30,7 @@ pub fn run_genmc_mode<'tcx>(
config: &MiriConfig,
eval_entry: impl Fn(Rc<GenmcCtx>) -> Result<(), NonZeroI32>,
) -> Result<(), NonZeroI32> {
// Check for supported target.
// Check for supported target: endianess and pointer size must match the host.
if tcx.data_layout.endian != Endian::Little || tcx.data_layout.pointer_size().bits() != 64 {
tcx.dcx().fatal("GenMC only supports 64bit little-endian targets");
}

View file

@ -9,7 +9,6 @@ pub mod weak_memory;
// Import either the real genmc adapter or a dummy module.
// On unsupported platforms, we always include the dummy module, even if the `genmc` feature is enabled.
// FIXME(genmc,macos): Add `target_os = "macos"` once `https://github.com/dtolnay/cxx/issues/1535` is fixed.
#[cfg_attr(
not(all(
feature = "genmc",

View file

@ -362,6 +362,10 @@ pub fn report_result<'tcx>(
vec![
note!("this is likely not a bug in the program; it indicates that the program performed an operation that Miri does not support"),
],
ResourceExhaustion(ResourceExhaustionInfo::AddressSpaceFull) if ecx.machine.data_race.as_genmc_ref().is_some() =>
vec![
note!("in GenMC mode, the address space is limited to 4GB per thread, and addresses cannot be reused")
],
UndefinedBehavior(AlignmentCheckFailed { .. })
if ecx.machine.check_alignment == AlignmentCheck::Symbolic
=>

View file

@ -487,6 +487,48 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// fadvise is only informational, we can ignore it.
this.write_null(dest)?;
}
"posix_fallocate" => {
// posix_fallocate is not supported by macos.
this.check_target_os(
&[Os::Linux, Os::FreeBsd, Os::Solaris, Os::Illumos, Os::Android],
link_name,
)?;
let [fd, offset, len] = this.check_shim_sig(
shim_sig!(extern "C" fn(i32, libc::off_t, libc::off_t) -> i32),
link_name,
abi,
args,
)?;
let fd = this.read_scalar(fd)?.to_i32()?;
// We don't support platforms which have libc::off_t bigger than 64 bits.
let offset =
i64::try_from(this.read_scalar(offset)?.to_int(offset.layout.size)?).unwrap();
let len = i64::try_from(this.read_scalar(len)?.to_int(len.layout.size)?).unwrap();
let result = this.posix_fallocate(fd, offset, len)?;
this.write_scalar(result, dest)?;
}
"posix_fallocate64" => {
// posix_fallocate64 is only supported on Linux and Android
this.check_target_os(&[Os::Linux, Os::Android], link_name)?;
let [fd, offset, len] = this.check_shim_sig(
shim_sig!(extern "C" fn(i32, libc::off64_t, libc::off64_t) -> i32),
link_name,
abi,
args,
)?;
let fd = this.read_scalar(fd)?.to_i32()?;
let offset = this.read_scalar(offset)?.to_i64()?;
let len = this.read_scalar(len)?.to_i64()?;
let result = this.posix_fallocate(fd, offset, len)?;
this.write_scalar(result, dest)?;
}
"realpath" => {
let [path, resolved_path] = this.check_shim_sig(
shim_sig!(extern "C" fn(*const _, *mut _) -> *mut _),

View file

@ -1202,6 +1202,65 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
}
/// NOTE: According to the man page of `possix_fallocate`, it returns the error code instead
/// of setting `errno`.
fn posix_fallocate(
&mut self,
fd_num: i32,
offset: i64,
len: i64,
) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
// Reject if isolation is enabled.
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
this.reject_in_isolation("`posix_fallocate`", reject_with)?;
// Return error code "EBADF" (bad fd).
return interp_ok(this.eval_libc("EBADF"));
}
// EINVAL is returned when: "offset was less than 0, or len was less than or equal to 0".
if offset < 0 || len <= 0 {
return interp_ok(this.eval_libc("EINVAL"));
}
// Get the file handle.
let Some(fd) = this.machine.fds.get(fd_num) else {
return interp_ok(this.eval_libc("EBADF"));
};
let file = match fd.downcast::<FileHandle>() {
Some(file_handle) => file_handle,
// Man page specifies to return ENODEV if `fd` is not a regular file.
None => return interp_ok(this.eval_libc("ENODEV")),
};
if !file.writable {
// The file is not writable.
return interp_ok(this.eval_libc("EBADF"));
}
let current_size = match file.file.metadata() {
Ok(metadata) => metadata.len(),
Err(err) => return this.io_error_to_errnum(err),
};
// Checked i64 addition, to ensure the result does not exceed the max file size.
let new_size = match offset.checked_add(len) {
// `new_size` is definitely non-negative, so we can cast to `u64`.
Some(new_size) => u64::try_from(new_size).unwrap(),
None => return interp_ok(this.eval_libc("EFBIG")), // new size too big
};
// If the size of the file is less than offset+size, then the file is increased to this size;
// otherwise the file size is left unchanged.
if current_size < new_size {
interp_ok(match file.file.set_len(new_size) {
Ok(()) => Scalar::from_i32(0),
Err(e) => this.io_error_to_errnum(e)?,
})
} else {
interp_ok(Scalar::from_i32(0))
}
}
fn fsync(&mut self, fd_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
// On macOS, `fsync` (unlike `fcntl(F_FULLFSYNC)`) does not wait for the
// underlying disk to finish writing. In the interest of host compatibility,

View file

@ -1,6 +1,7 @@
use std::cell::RefCell;
use std::collections::{BTreeMap, BTreeSet, VecDeque};
use std::io;
use std::ops::Bound;
use std::time::Duration;
use rustc_abi::FieldIdx;
@ -611,8 +612,12 @@ fn return_ready_list<'tcx>(
}
// While there is a slot to store another event, and an event to store, deliver that event.
// We can't use an iterator over `ready_set` as we want to remove elements as we go,
// so we track the most recently delivered event to find the next one. We track it as a lower
// bound that we can pass to `BTreeSet::range`.
let mut event_lower_bound = Bound::Unbounded;
while let Some(slot) = array_iter.next(ecx)?
&& let Some(&key) = ready_set.first()
&& let Some(&key) = ready_set.range((event_lower_bound, Bound::Unbounded)).next()
{
let interest = interest_list.get_mut(&key).expect("non-existent event in ready set");
// Deliver event to caller.
@ -623,9 +628,10 @@ fn return_ready_list<'tcx>(
num_of_events = num_of_events.strict_add(1);
// Synchronize receiving thread with the event of interest.
ecx.acquire_clock(&interest.clock)?;
// Since currently, all events are edge-triggered, we remove them from the ready set when
// they get delivered.
// This was an edge-triggered event, so remove it from the ready set.
ready_set.remove(&key);
// Go find the next event.
event_lower_bound = Bound::Excluded(key);
}
ecx.write_int(num_of_events, dest)?;
interp_ok(num_of_events)

View file

@ -6,7 +6,7 @@ use rustc_target::callconv::FnAbi;
use super::{
FloatBinOp, FloatUnaryOp, bin_op_simd_float_all, conditional_dot_product, convert_float_to_int,
mask_load, mask_store, round_all, test_bits_masked, test_high_bits_masked, unary_op_ps,
round_all, test_bits_masked, test_high_bits_masked, unary_op_ps,
};
use crate::*;
@ -200,27 +200,6 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
)?;
}
}
// Used to implement the _mm_maskload_ps, _mm_maskload_pd, _mm256_maskload_ps
// and _mm256_maskload_pd functions.
// For the element `i`, if the high bit of the `i`-th element of `mask`
// is one, it is loaded from `ptr.wrapping_add(i)`, otherwise zero is
// loaded.
"maskload.ps" | "maskload.pd" | "maskload.ps.256" | "maskload.pd.256" => {
let [ptr, mask] = this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
mask_load(this, ptr, mask, dest)?;
}
// Used to implement the _mm_maskstore_ps, _mm_maskstore_pd, _mm256_maskstore_ps
// and _mm256_maskstore_pd functions.
// For the element `i`, if the high bit of the element `i`-th of `mask`
// is one, it is stored into `ptr.wapping_add(i)`.
// Unlike SSE2's _mm_maskmoveu_si128, these are not non-temporal stores.
"maskstore.ps" | "maskstore.pd" | "maskstore.ps.256" | "maskstore.pd.256" => {
let [ptr, mask, value] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
mask_store(this, ptr, mask, value)?;
}
// Used to implement the _mm256_lddqu_si256 function.
// Reads a 256-bit vector from an unaligned pointer. This intrinsic
// is expected to perform better than a regular unaligned read when

View file

@ -5,8 +5,8 @@ use rustc_span::Symbol;
use rustc_target::callconv::FnAbi;
use super::{
ShiftOp, horizontal_bin_op, mask_load, mask_store, mpsadbw, packssdw, packsswb, packusdw,
packuswb, pmulhrsw, psadbw, psign, shift_simd_by_scalar, shift_simd_by_simd,
ShiftOp, horizontal_bin_op, mpsadbw, packssdw, packsswb, packusdw, packuswb, permute, pmaddbw,
pmulhrsw, psadbw, psign, shift_simd_by_scalar,
};
use crate::*;
@ -102,60 +102,11 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
}
// Used to implement the _mm256_maddubs_epi16 function.
// Multiplies packed 8-bit unsigned integers from `left` and packed
// signed 8-bit integers from `right` into 16-bit signed integers. Then,
// the saturating sum of the products with indices `2*i` and `2*i+1`
// produces the output at index `i`.
"pmadd.ub.sw" => {
let [left, right] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let (left, left_len) = this.project_to_simd(left)?;
let (right, right_len) = this.project_to_simd(right)?;
let (dest, dest_len) = this.project_to_simd(dest)?;
assert_eq!(left_len, right_len);
assert_eq!(dest_len.strict_mul(2), left_len);
for i in 0..dest_len {
let j1 = i.strict_mul(2);
let left1 = this.read_scalar(&this.project_index(&left, j1)?)?.to_u8()?;
let right1 = this.read_scalar(&this.project_index(&right, j1)?)?.to_i8()?;
let j2 = j1.strict_add(1);
let left2 = this.read_scalar(&this.project_index(&left, j2)?)?.to_u8()?;
let right2 = this.read_scalar(&this.project_index(&right, j2)?)?.to_i8()?;
let dest = this.project_index(&dest, i)?;
// Multiplication of a u8 and an i8 into an i16 cannot overflow.
let mul1 = i16::from(left1).strict_mul(right1.into());
let mul2 = i16::from(left2).strict_mul(right2.into());
let res = mul1.saturating_add(mul2);
this.write_scalar(Scalar::from_i16(res), &dest)?;
}
}
// Used to implement the _mm_maskload_epi32, _mm_maskload_epi64,
// _mm256_maskload_epi32 and _mm256_maskload_epi64 functions.
// For the element `i`, if the high bit of the `i`-th element of `mask`
// is one, it is loaded from `ptr.wrapping_add(i)`, otherwise zero is
// loaded.
"maskload.d" | "maskload.q" | "maskload.d.256" | "maskload.q.256" => {
let [ptr, mask] = this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
mask_load(this, ptr, mask, dest)?;
}
// Used to implement the _mm_maskstore_epi32, _mm_maskstore_epi64,
// _mm256_maskstore_epi32 and _mm256_maskstore_epi64 functions.
// For the element `i`, if the high bit of the element `i`-th of `mask`
// is one, it is stored into `ptr.wapping_add(i)`.
// Unlike SSE2's _mm_maskmoveu_si128, these are not non-temporal stores.
"maskstore.d" | "maskstore.q" | "maskstore.d.256" | "maskstore.q.256" => {
let [ptr, mask, value] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
mask_store(this, ptr, mask, value)?;
pmaddbw(this, left, right, dest)?;
}
// Used to implement the _mm256_mpsadbw_epu8 function.
// Compute the sum of absolute differences of quadruplets of unsigned
@ -217,28 +168,12 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
packusdw(this, left, right, dest)?;
}
// Used to implement the _mm256_permutevar8x32_epi32 and
// _mm256_permutevar8x32_ps function.
// Shuffles `left` using the three low bits of each element of `right`
// as indices.
// Used to implement _mm256_permutevar8x32_epi32 and _mm256_permutevar8x32_ps.
"permd" | "permps" => {
let [left, right] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let (left, left_len) = this.project_to_simd(left)?;
let (right, right_len) = this.project_to_simd(right)?;
let (dest, dest_len) = this.project_to_simd(dest)?;
assert_eq!(dest_len, left_len);
assert_eq!(dest_len, right_len);
for i in 0..dest_len {
let dest = this.project_index(&dest, i)?;
let right = this.read_scalar(&this.project_index(&right, i)?)?.to_u32()?;
let left = this.project_index(&left, (right & 0b111).into())?;
this.copy_op(&left, &dest)?;
}
permute(this, left, right, dest)?;
}
// Used to implement the _mm256_sad_epu8 function.
"psad.bw" => {
@ -310,22 +245,6 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
shift_simd_by_scalar(this, left, right, which, dest)?;
}
// Used to implement the _mm{,256}_{sllv,srlv,srav}_epi{32,64} functions
// (except _mm{,256}_srav_epi64, which are not available in AVX2).
"psllv.d" | "psllv.d.256" | "psllv.q" | "psllv.q.256" | "psrlv.d" | "psrlv.d.256"
| "psrlv.q" | "psrlv.q.256" | "psrav.d" | "psrav.d.256" => {
let [left, right] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let which = match unprefixed_name {
"psllv.d" | "psllv.d.256" | "psllv.q" | "psllv.q.256" => ShiftOp::Left,
"psrlv.d" | "psrlv.d.256" | "psrlv.q" | "psrlv.q.256" => ShiftOp::RightLogic,
"psrav.d" | "psrav.d.256" => ShiftOp::RightArith,
_ => unreachable!(),
};
shift_simd_by_simd(this, left, right, which, dest)?;
}
_ => return interp_ok(EmulateItemResult::NotSupported),
}
interp_ok(EmulateItemResult::NeedsReturn)

View file

@ -3,7 +3,7 @@ use rustc_middle::ty::Ty;
use rustc_span::Symbol;
use rustc_target::callconv::FnAbi;
use super::psadbw;
use super::{permute, pmaddbw, psadbw};
use crate::*;
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
@ -88,6 +88,20 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
psadbw(this, left, right, dest)?
}
// Used to implement the _mm512_maddubs_epi16 function.
"pmaddubs.w.512" => {
let [left, right] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
pmaddbw(this, left, right, dest)?;
}
// Used to implement the _mm512_permutexvar_epi32 function.
"permvar.si.512" => {
let [left, right] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
permute(this, left, right, dest)?;
}
_ => return interp_ok(EmulateItemResult::NotSupported),
}
interp_ok(EmulateItemResult::NeedsReturn)

View file

@ -518,61 +518,6 @@ fn shift_simd_by_scalar<'tcx>(
interp_ok(())
}
/// Shifts each element of `left` by the corresponding element of `right`.
///
/// For logic shifts, when right is larger than BITS - 1, zero is produced.
/// For arithmetic right-shifts, when right is larger than BITS - 1, the sign
/// bit is copied to all bits.
fn shift_simd_by_simd<'tcx>(
ecx: &mut crate::MiriInterpCx<'tcx>,
left: &OpTy<'tcx>,
right: &OpTy<'tcx>,
which: ShiftOp,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, ()> {
let (left, left_len) = ecx.project_to_simd(left)?;
let (right, right_len) = ecx.project_to_simd(right)?;
let (dest, dest_len) = ecx.project_to_simd(dest)?;
assert_eq!(dest_len, left_len);
assert_eq!(dest_len, right_len);
for i in 0..dest_len {
let left = ecx.read_scalar(&ecx.project_index(&left, i)?)?;
let right = ecx.read_scalar(&ecx.project_index(&right, i)?)?;
let dest = ecx.project_index(&dest, i)?;
// It is ok to saturate the value to u32::MAX because any value
// above BITS - 1 will produce the same result.
let shift = u32::try_from(right.to_uint(dest.layout.size)?).unwrap_or(u32::MAX);
let res = match which {
ShiftOp::Left => {
let left = left.to_uint(dest.layout.size)?;
let res = left.checked_shl(shift).unwrap_or(0);
// `truncate` is needed as left-shift can make the absolute value larger.
Scalar::from_uint(dest.layout.size.truncate(res), dest.layout.size)
}
ShiftOp::RightLogic => {
let left = left.to_uint(dest.layout.size)?;
let res = left.checked_shr(shift).unwrap_or(0);
// No `truncate` needed as right-shift can only make the absolute value smaller.
Scalar::from_uint(res, dest.layout.size)
}
ShiftOp::RightArith => {
let left = left.to_int(dest.layout.size)?;
// On overflow, copy the sign bit to the remaining bits
let res = left.checked_shr(shift).unwrap_or(left >> 127);
// No `truncate` needed as right-shift can only make the absolute value smaller.
Scalar::from_int(res, dest.layout.size)
}
};
ecx.write_scalar(res, &dest)?;
}
interp_ok(())
}
/// Takes a 128-bit vector, transmutes it to `[u64; 2]` and extracts
/// the first value.
fn extract_first_u64<'tcx>(
@ -912,73 +857,6 @@ fn test_high_bits_masked<'tcx>(
interp_ok((direct, negated))
}
/// Conditionally loads from `ptr` according the high bit of each
/// element of `mask`. `ptr` does not need to be aligned.
fn mask_load<'tcx>(
ecx: &mut crate::MiriInterpCx<'tcx>,
ptr: &OpTy<'tcx>,
mask: &OpTy<'tcx>,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, ()> {
let (mask, mask_len) = ecx.project_to_simd(mask)?;
let (dest, dest_len) = ecx.project_to_simd(dest)?;
assert_eq!(dest_len, mask_len);
let mask_item_size = mask.layout.field(ecx, 0).size;
let high_bit_offset = mask_item_size.bits().strict_sub(1);
let ptr = ecx.read_pointer(ptr)?;
for i in 0..dest_len {
let mask = ecx.project_index(&mask, i)?;
let dest = ecx.project_index(&dest, i)?;
if ecx.read_scalar(&mask)?.to_uint(mask_item_size)? >> high_bit_offset != 0 {
let ptr = ptr.wrapping_offset(dest.layout.size * i, &ecx.tcx);
// Unaligned copy, which is what we want.
ecx.mem_copy(ptr, dest.ptr(), dest.layout.size, /*nonoverlapping*/ true)?;
} else {
ecx.write_scalar(Scalar::from_int(0, dest.layout.size), &dest)?;
}
}
interp_ok(())
}
/// Conditionally stores into `ptr` according the high bit of each
/// element of `mask`. `ptr` does not need to be aligned.
fn mask_store<'tcx>(
ecx: &mut crate::MiriInterpCx<'tcx>,
ptr: &OpTy<'tcx>,
mask: &OpTy<'tcx>,
value: &OpTy<'tcx>,
) -> InterpResult<'tcx, ()> {
let (mask, mask_len) = ecx.project_to_simd(mask)?;
let (value, value_len) = ecx.project_to_simd(value)?;
assert_eq!(value_len, mask_len);
let mask_item_size = mask.layout.field(ecx, 0).size;
let high_bit_offset = mask_item_size.bits().strict_sub(1);
let ptr = ecx.read_pointer(ptr)?;
for i in 0..value_len {
let mask = ecx.project_index(&mask, i)?;
let value = ecx.project_index(&value, i)?;
if ecx.read_scalar(&mask)?.to_uint(mask_item_size)? >> high_bit_offset != 0 {
// *Non-inbounds* pointer arithmetic to compute the destination.
// (That's why we can't use a place projection.)
let ptr = ptr.wrapping_offset(value.layout.size * i, &ecx.tcx);
// Deref the pointer *unaligned*, and do the copy.
let dest = ecx.ptr_to_mplace_unaligned(ptr, value.layout);
ecx.copy_op(&value, &dest)?;
}
}
interp_ok(())
}
/// Compute the sum of absolute differences of quadruplets of unsigned
/// 8-bit integers in `left` and `right`, and store the 16-bit results
/// in `right`. Quadruplets are selected from `left` and `right` with
@ -1086,6 +964,90 @@ fn psadbw<'tcx>(
interp_ok(())
}
/// Multiplies packed 8-bit unsigned integers from `left` and packed
/// signed 8-bit integers from `right` into 16-bit signed integers. Then,
/// the saturating sum of the products with indices `2*i` and `2*i+1`
/// produces the output at index `i`.
///
/// <https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maddubs_epi16>
/// <https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maddubs_epi16>
/// <https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maddubs_epi16>
fn pmaddbw<'tcx>(
ecx: &mut crate::MiriInterpCx<'tcx>,
left: &OpTy<'tcx>,
right: &OpTy<'tcx>,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, ()> {
let (left, left_len) = ecx.project_to_simd(left)?;
let (right, right_len) = ecx.project_to_simd(right)?;
let (dest, dest_len) = ecx.project_to_simd(dest)?;
// fn pmaddubsw128(a: u8x16, b: i8x16) -> i16x8;
// fn pmaddubsw( a: u8x32, b: i8x32) -> i16x16;
// fn vpmaddubsw( a: u8x64, b: i8x64) -> i16x32;
assert_eq!(left_len, right_len);
assert_eq!(dest_len.strict_mul(2), left_len);
for i in 0..dest_len {
let j1 = i.strict_mul(2);
let left1 = ecx.read_scalar(&ecx.project_index(&left, j1)?)?.to_u8()?;
let right1 = ecx.read_scalar(&ecx.project_index(&right, j1)?)?.to_i8()?;
let j2 = j1.strict_add(1);
let left2 = ecx.read_scalar(&ecx.project_index(&left, j2)?)?.to_u8()?;
let right2 = ecx.read_scalar(&ecx.project_index(&right, j2)?)?.to_i8()?;
let dest = ecx.project_index(&dest, i)?;
// Multiplication of a u8 and an i8 into an i16 cannot overflow.
let mul1 = i16::from(left1).strict_mul(right1.into());
let mul2 = i16::from(left2).strict_mul(right2.into());
let res = mul1.saturating_add(mul2);
ecx.write_scalar(Scalar::from_i16(res), &dest)?;
}
interp_ok(())
}
/// Shuffle 32-bit integers in `values` across lanes using the corresponding
/// index in `indices`, and store the results in dst.
///
/// <https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutevar8x32_epi32>
/// <https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutevar8x32_ps>
/// <https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutexvar_epi32>
fn permute<'tcx>(
ecx: &mut crate::MiriInterpCx<'tcx>,
values: &OpTy<'tcx>,
indices: &OpTy<'tcx>,
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx, ()> {
let (values, values_len) = ecx.project_to_simd(values)?;
let (indices, indices_len) = ecx.project_to_simd(indices)?;
let (dest, dest_len) = ecx.project_to_simd(dest)?;
// fn permd(a: u32x8, b: u32x8) -> u32x8;
// fn permps(a: __m256, b: i32x8) -> __m256;
// fn vpermd(a: i32x16, idx: i32x16) -> i32x16;
assert_eq!(dest_len, values_len);
assert_eq!(dest_len, indices_len);
// Only use the lower 3 bits to index into a vector with 8 lanes,
// or the lower 4 bits when indexing into a 16-lane vector.
assert!(dest_len.is_power_of_two());
let mask = u32::try_from(dest_len).unwrap().strict_sub(1);
for i in 0..dest_len {
let dest = ecx.project_index(&dest, i)?;
let index = ecx.read_scalar(&ecx.project_index(&indices, i)?)?.to_u32()?;
let element = ecx.project_index(&values, (index & mask).into())?;
ecx.copy_op(&element, &dest)?;
}
interp_ok(())
}
/// Multiplies packed 16-bit signed integer values, truncates the 32-bit
/// product to the 18 most significant bits by right-shifting, and then
/// divides the 18-bit value by 2 (rounding to nearest) by first adding

View file

@ -4,7 +4,7 @@ use rustc_middle::ty::Ty;
use rustc_span::Symbol;
use rustc_target::callconv::FnAbi;
use super::{horizontal_bin_op, pmulhrsw, psign};
use super::{horizontal_bin_op, pmaddbw, pmulhrsw, psign};
use crate::*;
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
@ -67,40 +67,11 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
horizontal_bin_op(this, which, /*saturating*/ true, left, right, dest)?;
}
// Used to implement the _mm_maddubs_epi16 function.
// Multiplies packed 8-bit unsigned integers from `left` and packed
// signed 8-bit integers from `right` into 16-bit signed integers. Then,
// the saturating sum of the products with indices `2*i` and `2*i+1`
// produces the output at index `i`.
// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maddubs_epi16
"pmadd.ub.sw.128" => {
let [left, right] =
this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?;
let (left, left_len) = this.project_to_simd(left)?;
let (right, right_len) = this.project_to_simd(right)?;
let (dest, dest_len) = this.project_to_simd(dest)?;
assert_eq!(left_len, right_len);
assert_eq!(dest_len.strict_mul(2), left_len);
for i in 0..dest_len {
let j1 = i.strict_mul(2);
let left1 = this.read_scalar(&this.project_index(&left, j1)?)?.to_u8()?;
let right1 = this.read_scalar(&this.project_index(&right, j1)?)?.to_i8()?;
let j2 = j1.strict_add(1);
let left2 = this.read_scalar(&this.project_index(&left, j2)?)?.to_u8()?;
let right2 = this.read_scalar(&this.project_index(&right, j2)?)?.to_i8()?;
let dest = this.project_index(&dest, i)?;
// Multiplication of a u8 and an i8 into an i16 cannot overflow.
let mul1 = i16::from(left1).strict_mul(right1.into());
let mul2 = i16::from(left2).strict_mul(right2.into());
let res = mul1.saturating_add(mul2);
this.write_scalar(Scalar::from_i16(res), &dest)?;
}
pmaddbw(this, left, right, dest)?;
}
// Used to implement the _mm_mulhrs_epi16 function.
// Multiplies packed 16-bit signed integer values, truncates the 32-bit

View file

@ -0,0 +1,26 @@
Running GenMC Verification...
error: resource exhaustion: there are no more free addresses in the address space
--> RUSTLIB/alloc/src/raw_vec/mod.rs:LL:CC
|
LL | AllocInit::Uninitialized => alloc.allocate(layout),
| ^^^^^^^^^^^^^^^^^^^^^^ resource exhaustion occurred here
|
= help: in GenMC mode, the address space is limited to 4GB per thread, and addresses cannot be reused
= note: BACKTRACE:
= note: inside `alloc::raw_vec::RawVecInner::try_allocate_in` at RUSTLIB/alloc/src/raw_vec/mod.rs:LL:CC
= note: inside `alloc::raw_vec::RawVecInner::with_capacity_in` at RUSTLIB/alloc/src/raw_vec/mod.rs:LL:CC
= note: inside `alloc::raw_vec::RawVec::<u8>::with_capacity_in` at RUSTLIB/alloc/src/raw_vec/mod.rs:LL:CC
= note: inside `std::vec::Vec::<u8>::with_capacity_in` at RUSTLIB/alloc/src/vec/mod.rs:LL:CC
= note: inside `std::vec::Vec::<u8>::with_capacity` at RUSTLIB/alloc/src/vec/mod.rs:LL:CC
note: inside `miri_start`
--> tests/genmc/fail/simple/alloc_large.rs:LL:CC
|
LL | let _v = Vec::<u8>::with_capacity(1024 * 1024 * 1024);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
note: add `-Zmiri-genmc-print-genmc-output` to MIRIFLAGS to see the detailed GenMC error report
error: aborting due to 1 previous error

View file

@ -0,0 +1,24 @@
//@revisions: single multiple
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@error-in-other-file: resource exhaustion
// Ensure that we emit a proper error if GenMC fails to fulfill an allocation.
// Two variants: one for a single large allocation, one for multiple ones
// that are individually below the limit, but together are too big.
#![no_main]
#[path = "../../../utils/genmc.rs"]
mod genmc;
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
if cfg!(multiple) {
for _i in 1..8 {
let _v = Vec::<u8>::with_capacity(1024 * 1024 * 1024);
}
} else {
let _v = Vec::<u8>::with_capacity(8 * 1024 * 1024 * 1024);
}
0
}

View file

@ -0,0 +1,26 @@
Running GenMC Verification...
error: resource exhaustion: there are no more free addresses in the address space
--> RUSTLIB/alloc/src/raw_vec/mod.rs:LL:CC
|
LL | AllocInit::Uninitialized => alloc.allocate(layout),
| ^^^^^^^^^^^^^^^^^^^^^^ resource exhaustion occurred here
|
= help: in GenMC mode, the address space is limited to 4GB per thread, and addresses cannot be reused
= note: BACKTRACE:
= note: inside `alloc::raw_vec::RawVecInner::try_allocate_in` at RUSTLIB/alloc/src/raw_vec/mod.rs:LL:CC
= note: inside `alloc::raw_vec::RawVecInner::with_capacity_in` at RUSTLIB/alloc/src/raw_vec/mod.rs:LL:CC
= note: inside `alloc::raw_vec::RawVec::<u8>::with_capacity_in` at RUSTLIB/alloc/src/raw_vec/mod.rs:LL:CC
= note: inside `std::vec::Vec::<u8>::with_capacity_in` at RUSTLIB/alloc/src/vec/mod.rs:LL:CC
= note: inside `std::vec::Vec::<u8>::with_capacity` at RUSTLIB/alloc/src/vec/mod.rs:LL:CC
note: inside `miri_start`
--> tests/genmc/fail/simple/alloc_large.rs:LL:CC
|
LL | let _v = Vec::<u8>::with_capacity(8 * 1024 * 1024 * 1024);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
note: add `-Zmiri-genmc-print-genmc-output` to MIRIFLAGS to see the detailed GenMC error report
error: aborting due to 1 previous error

View file

@ -67,7 +67,7 @@ macro_rules! test_rmw_edge_cases {
x.store(10, ORD);
assert_eq(10, x.fetch_add(<$int>::MAX, ORD)); // definitely overflows, so new value of x is smaller than 10
assert_eq(<$int>::MAX.wrapping_add(10), x.fetch_max(10, ORD)); // new value of x should be 10
// assert_eq(10, x.load(ORD)); // FIXME(genmc,#4572): enable this check once GenMC correctly handles min/max truncation.
assert_eq(10, x.load(ORD));
}};
}

View file

@ -0,0 +1,90 @@
//@ignore-target: windows # No libc
//@compile-flags: -Zmiri-disable-isolation
use std::ffi::CStr;
fn test_getenv() {
let s = unsafe { libc::getenv(c"MIRI_ENV_VAR_TEST".as_ptr()) };
assert!(!s.is_null());
let value = unsafe { CStr::from_ptr(s).to_str().unwrap() };
assert_eq!(value, "0");
// Get a non-existing environment variable
let s = unsafe { libc::getenv(c"MIRI_TEST_NONEXISTENT_VAR".as_ptr()) };
assert!(s.is_null());
// Empty string should not crash
let s = unsafe { libc::getenv(c"".as_ptr()) };
assert!(s.is_null());
}
fn test_setenv() {
// Set a new environment variable
let result = unsafe { libc::setenv(c"MIRI_TEST_VAR".as_ptr(), c"test_value".as_ptr(), 1) };
assert_eq!(result, 0);
// Verify it was set
let s = unsafe { libc::getenv(c"MIRI_TEST_VAR".as_ptr()) };
assert!(!s.is_null());
let value = unsafe { CStr::from_ptr(s).to_str().unwrap() };
assert_eq!(value, "test_value");
// Test overwriting an existing variable
let result = unsafe { libc::setenv(c"MIRI_TEST_VAR".as_ptr(), c"new_value".as_ptr(), 1) };
assert_eq!(result, 0);
// Verify it was updated
let s = unsafe { libc::getenv(c"MIRI_TEST_VAR".as_ptr()) };
assert!(!s.is_null());
let value = unsafe { CStr::from_ptr(s).to_str().unwrap() };
assert_eq!(value, "new_value");
// Test invalid parameters
let result = unsafe { libc::setenv(std::ptr::null(), c"value".as_ptr(), 1) };
assert_eq!(result, -1);
let result = unsafe { libc::setenv(c"".as_ptr(), c"value".as_ptr(), 1) };
assert_eq!(result, -1);
let result = unsafe { libc::setenv(c"INVALID=NAME".as_ptr(), c"value".as_ptr(), 1) };
assert_eq!(result, -1);
}
fn test_unsetenv() {
// Set a variable
let result =
unsafe { libc::setenv(c"MIRI_TEST_UNSET_VAR".as_ptr(), c"to_be_unset".as_ptr(), 1) };
assert_eq!(result, 0);
// Verify it exists
let s = unsafe { libc::getenv(c"MIRI_TEST_UNSET_VAR".as_ptr()) };
assert!(!s.is_null());
// Unset it
let result = unsafe { libc::unsetenv(c"MIRI_TEST_UNSET_VAR".as_ptr()) };
assert_eq!(result, 0);
// Verify it was unset
let s = unsafe { libc::getenv(c"MIRI_TEST_UNSET_VAR".as_ptr()) };
assert!(s.is_null());
// Test unsetting a non-existing variable (should succeed)
let result = unsafe { libc::unsetenv(c"MIRI_TEST_NONEXISTENT_VAR".as_ptr()) };
assert_eq!(result, 0);
// Test invalid parameters
let result = unsafe { libc::unsetenv(std::ptr::null()) };
assert_eq!(result, -1);
let result = unsafe { libc::unsetenv(c"".as_ptr()) };
assert_eq!(result, -1);
let result = unsafe { libc::unsetenv(c"INVALID=NAME".as_ptr()) };
assert_eq!(result, -1);
}
fn main() {
test_getenv();
test_setenv();
test_unsetenv();
}

View file

@ -4,6 +4,8 @@ use std::convert::TryInto;
#[path = "../../utils/libc.rs"]
mod libc_utils;
use libc_utils::epoll::*;
use libc_utils::*;
fn main() {
test_epoll_socketpair();
@ -51,56 +53,41 @@ fn check_epoll_wait<const N: usize>(epfd: i32, expected_notifications: &[(u32, u
fn test_epoll_socketpair() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Create a socketpair instance.
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
let fds = [fds[1], fds[0]];
errno_check(unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) });
// Write to fd[0]
let data = "abcde".as_bytes().as_ptr();
let res = unsafe { libc_utils::write_all(fds[0], data as *const libc::c_void, 5) };
assert_eq!(res, 5);
write_all_from_slice(fds[0], "abcde".as_bytes()).unwrap();
// Register fd[1] with EPOLLIN|EPOLLOUT|EPOLLET|EPOLLRDHUP
let mut ev = libc::epoll_event {
events: (libc::EPOLLIN | libc::EPOLLOUT | libc::EPOLLET | libc::EPOLLRDHUP) as _,
u64: u64::try_from(fds[1]).unwrap(),
};
let res = unsafe { libc::epoll_ctl(epfd, libc::EPOLL_CTL_ADD, fds[1], &mut ev) };
assert_eq!(res, 0);
epoll_ctl_add(epfd, fds[1], EPOLLIN | EPOLLOUT | EPOLLET | EPOLLRDHUP).unwrap();
// Check result from epoll_wait.
let expected_event = u32::try_from(libc::EPOLLIN | libc::EPOLLOUT).unwrap();
let expected_value = u64::try_from(fds[1]).unwrap();
check_epoll_wait::<8>(epfd, &[(expected_event, expected_value)]);
check_epoll_wait_noblock::<8>(epfd, &[Ev { data: fds[1], events: EPOLLIN | EPOLLOUT }]);
// Check that this is indeed using "ET" (edge-trigger) semantics: a second epoll should return nothing.
check_epoll_wait::<8>(epfd, &[]);
check_epoll_wait_noblock::<8>(epfd, &[]);
// Write some more to fd[0].
let data = "abcde".as_bytes().as_ptr();
let res = unsafe { libc_utils::write_all(fds[0], data as *const libc::c_void, 5) };
assert_eq!(res, 5);
write_all_from_slice(fds[0], "abcde".as_bytes()).unwrap();
// This did not change the readiness of fd[1], so we should get no event.
// However, Linux seems to always deliver spurious events to the peer on each write,
// so we match that.
check_epoll_wait::<8>(epfd, &[(expected_event, expected_value)]);
check_epoll_wait_noblock::<8>(epfd, &[Ev { data: fds[1], events: EPOLLIN | EPOLLOUT }]);
// Close the peer socketpair.
let res = unsafe { libc::close(fds[0]) };
assert_eq!(res, 0);
errno_check(unsafe { libc::close(fds[0]) });
// Check result from epoll_wait.
// We expect to get a read, write, HUP notification from the close since closing an FD always unblocks reads and writes on its peer.
let expected_event =
u32::try_from(libc::EPOLLIN | libc::EPOLLOUT | libc::EPOLLHUP | libc::EPOLLRDHUP).unwrap();
let expected_value = u64::try_from(fds[1]).unwrap();
check_epoll_wait::<8>(epfd, &[(expected_event, expected_value)]);
// Check result from epoll_wait. We expect to get a read, write, HUP notification from the close
// since closing an FD always unblocks reads and writes on its peer.
check_epoll_wait_noblock::<8>(
epfd,
&[Ev { data: fds[1], events: EPOLLIN | EPOLLOUT | EPOLLHUP | EPOLLRDHUP }],
);
}
// This test first registers a file description with a flag that does not lead to notification,
@ -108,69 +95,49 @@ fn test_epoll_socketpair() {
// Also check that the new data value set via MOD is applied properly.
fn test_epoll_ctl_mod() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Create a socketpair instance.
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
errno_check(unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) });
// Register fd[1] with EPOLLIN|EPOLLET, and data of "0".
let mut ev = libc::epoll_event { events: (libc::EPOLLIN | libc::EPOLLET) as _, u64: 0 };
let res = unsafe { libc::epoll_ctl(epfd, libc::EPOLL_CTL_ADD, fds[1], &mut ev) };
assert_eq!(res, 0);
epoll_ctl(epfd, EPOLL_CTL_ADD, fds[1], Ev { events: EPOLLIN | EPOLLET, data: 0 }).unwrap();
// Check result from epoll_wait. No notification would be returned.
check_epoll_wait::<8>(epfd, &[]);
check_epoll_wait_noblock::<8>(epfd, &[]);
// Use EPOLL_CTL_MOD to change to EPOLLOUT flag and data.
let mut ev = libc::epoll_event { events: (libc::EPOLLOUT | libc::EPOLLET) as _, u64: 1 };
let res = unsafe { libc::epoll_ctl(epfd, libc::EPOLL_CTL_MOD, fds[1], &mut ev) };
assert_eq!(res, 0);
epoll_ctl(epfd, EPOLL_CTL_MOD, fds[1], Ev { events: EPOLLOUT | EPOLLET, data: 1 }).unwrap();
// Check result from epoll_wait. EPOLLOUT notification and new data is expected.
let expected_event = u32::try_from(libc::EPOLLOUT).unwrap();
let expected_value = 1;
check_epoll_wait::<8>(epfd, &[(expected_event, expected_value)]);
check_epoll_wait_noblock::<8>(epfd, &[Ev { events: EPOLLOUT, data: 1 }]);
// Write to fds[1] and read from fds[0] to make the notification ready again
// (relying on there always being an event when the buffer gets emptied).
let data = "abc".as_bytes();
let res = unsafe { libc_utils::write_all(fds[1], data.as_ptr().cast(), data.len()) };
assert_eq!(res, 3);
let mut buf = [0u8; 3];
let res = unsafe { libc_utils::read_all(fds[0], buf.as_mut_ptr().cast(), buf.len()) };
assert_eq!(res, 3);
write_all_from_slice(fds[1], "abc".as_bytes()).unwrap();
read_all_into_array::<3>(fds[0]).unwrap();
// Now that the event is already ready, change the "data" value.
let mut ev = libc::epoll_event { events: (libc::EPOLLOUT | libc::EPOLLET) as _, u64: 2 };
let res = unsafe { libc::epoll_ctl(epfd, libc::EPOLL_CTL_MOD, fds[1], &mut ev) };
assert_eq!(res, 0);
epoll_ctl(epfd, EPOLL_CTL_MOD, fds[1], Ev { events: EPOLLOUT | EPOLLET, data: 2 }).unwrap();
// Receive event, with latest data value.
let expected_event = u32::try_from(libc::EPOLLOUT).unwrap();
let expected_value = 2;
check_epoll_wait::<8>(epfd, &[(expected_event, expected_value)]);
check_epoll_wait_noblock::<8>(epfd, &[Ev { events: EPOLLOUT, data: 2 }]);
// Do another update that changes nothing.
let mut ev = libc::epoll_event { events: (libc::EPOLLOUT | libc::EPOLLET) as _, u64: 2 };
let res = unsafe { libc::epoll_ctl(epfd, libc::EPOLL_CTL_MOD, fds[1], &mut ev) };
assert_eq!(res, 0);
epoll_ctl(epfd, EPOLL_CTL_MOD, fds[1], Ev { events: EPOLLOUT | EPOLLET, data: 2 }).unwrap();
// This re-triggers the event, even if it's the same flags as before.
check_epoll_wait::<8>(epfd, &[(expected_event, expected_value)]);
check_epoll_wait_noblock::<8>(epfd, &[Ev { events: EPOLLOUT, data: 2 }]);
}
fn test_epoll_ctl_del() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Create a socketpair instance.
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
errno_check(unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) });
// Write to fd[0]
let data = "abcde".as_bytes().as_ptr();
@ -198,8 +165,7 @@ fn test_two_epoll_instance() {
// Create a socketpair instance.
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
errno_check(unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) });
// Write to the socketpair.
let data = "abcde".as_bytes().as_ptr();
@ -224,13 +190,11 @@ fn test_two_epoll_instance() {
// Notification should be provided for both.
fn test_two_same_fd_in_same_epoll_instance() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Create a socketpair instance.
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
errno_check(unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) });
// Dup the fd.
let newfd = unsafe { libc::dup(fds[1]) };
@ -260,14 +224,13 @@ fn test_two_same_fd_in_same_epoll_instance() {
fn test_epoll_eventfd() {
// Create an eventfd instance.
let flags = libc::EFD_NONBLOCK | libc::EFD_CLOEXEC;
let fd = unsafe { libc::eventfd(0, flags) };
let fd = errno_result(unsafe { libc::eventfd(0, flags) }).unwrap();
// Write 1 to the eventfd instance.
libc_utils::write_all_from_slice(fd, &1_u64.to_ne_bytes()).unwrap();
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Register eventfd with EPOLLIN | EPOLLOUT | EPOLLET
let mut ev = libc::epoll_event { events: EPOLL_IN_OUT_ET, u64: u64::try_from(fd).unwrap() };
@ -308,13 +271,11 @@ fn test_epoll_eventfd() {
// When read/write happened on one side of the socketpair, only the other side will be notified.
fn test_epoll_socketpair_both_sides() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Create a socketpair instance.
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
errno_check(unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) });
// Register both fd to the same epoll instance.
let mut ev = libc::epoll_event { events: EPOLL_IN_OUT_ET, u64: fds[0] as u64 };
@ -358,12 +319,11 @@ fn test_epoll_socketpair_both_sides() {
// that file description.
fn test_closed_fd() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Create an eventfd instance.
let flags = libc::EFD_NONBLOCK | libc::EFD_CLOEXEC;
let fd = unsafe { libc::eventfd(0, flags) };
let fd = errno_result(unsafe { libc::eventfd(0, flags) }).unwrap();
// Register eventfd with EPOLLIN | EPOLLOUT | EPOLLET
let mut ev = libc::epoll_event { events: EPOLL_IN_OUT_ET, u64: u64::try_from(fd).unwrap() };
@ -376,8 +336,7 @@ fn test_closed_fd() {
assert_eq!(res, 8);
// Close the eventfd.
let res = unsafe { libc::close(fd) };
assert_eq!(res, 0);
errno_check(unsafe { libc::close(fd) });
// No notification should be provided because the file description is closed.
check_epoll_wait::<8>(epfd, &[]);
@ -391,16 +350,14 @@ fn test_closed_fd() {
// referring to the underlying open file description have been closed.
fn test_not_fully_closed_fd() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Create an eventfd instance.
let flags = libc::EFD_NONBLOCK | libc::EFD_CLOEXEC;
let fd = unsafe { libc::eventfd(0, flags) };
let fd =
errno_result(unsafe { libc::eventfd(0, libc::EFD_NONBLOCK | libc::EFD_CLOEXEC) }).unwrap();
// Dup the fd.
let newfd = unsafe { libc::dup(fd) };
assert_ne!(newfd, -1);
let newfd = errno_result(unsafe { libc::dup(fd) }).unwrap();
// Register eventfd with EPOLLIN | EPOLLOUT | EPOLLET
let mut ev = libc::epoll_event { events: EPOLL_IN_OUT_ET, u64: u64::try_from(fd).unwrap() };
@ -408,8 +365,7 @@ fn test_not_fully_closed_fd() {
assert_eq!(res, 0);
// Close the original fd that being used to register with epoll.
let res = unsafe { libc::close(fd) };
assert_eq!(res, 0);
errno_check(unsafe { libc::close(fd) });
// Notification should still be provided because the file description is not closed.
let expected_event = u32::try_from(libc::EPOLLOUT).unwrap();
@ -423,8 +379,7 @@ fn test_not_fully_closed_fd() {
assert_eq!(res, 8);
// Close the dupped fd.
let res = unsafe { libc::close(newfd) };
assert_eq!(res, 0);
errno_check(unsafe { libc::close(newfd) });
// No notification should be provided.
check_epoll_wait::<1>(epfd, &[]);
@ -434,8 +389,8 @@ fn test_not_fully_closed_fd() {
// at the moment the latest event occurred.
fn test_event_overwrite() {
// Create an eventfd instance.
let flags = libc::EFD_NONBLOCK | libc::EFD_CLOEXEC;
let fd = unsafe { libc::eventfd(0, flags) };
let fd =
errno_result(unsafe { libc::eventfd(0, libc::EFD_NONBLOCK | libc::EFD_CLOEXEC) }).unwrap();
// Write to the eventfd instance.
let sized_8_data: [u8; 8] = 1_u64.to_ne_bytes();
@ -443,8 +398,7 @@ fn test_event_overwrite() {
assert_eq!(res, 8);
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Register eventfd with EPOLLIN | EPOLLOUT | EPOLLET
let mut ev = libc::epoll_event {
@ -469,13 +423,11 @@ fn test_event_overwrite() {
// This behaviour differs from the real system.
fn test_socketpair_read() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Create a socketpair instance.
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
errno_check(unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) });
// Register both fd to the same epoll instance.
let mut ev = libc::epoll_event {
@ -533,13 +485,11 @@ fn test_socketpair_read() {
// This is to test whether flag that we don't register won't trigger notification.
fn test_no_notification_for_unregister_flag() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Create a socketpair instance.
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
errno_check(unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) });
// Register fd[0] with EPOLLOUT|EPOLLET.
let mut ev = libc::epoll_event {
@ -565,8 +515,7 @@ fn test_no_notification_for_unregister_flag() {
fn test_epoll_wait_maxevent_zero() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// It is ok to use a dangling pointer here because it will error out before the
// pointer actually gets accessed.
let array_ptr = std::ptr::without_provenance_mut::<libc::epoll_event>(0x100);
@ -578,13 +527,11 @@ fn test_epoll_wait_maxevent_zero() {
fn test_socketpair_epollerr() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Create a socketpair instance.
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
errno_check(unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) });
// Write to fd[0]
let data = "abcde".as_bytes().as_ptr();
@ -593,8 +540,7 @@ fn test_socketpair_epollerr() {
// Close fds[1].
// EPOLLERR will be triggered if we close peer fd that still has data in its read buffer.
let res = unsafe { libc::close(fds[1]) };
assert_eq!(res, 0);
errno_check(unsafe { libc::close(fds[1]) });
// Register fd[1] with EPOLLIN|EPOLLOUT|EPOLLET|EPOLLRDHUP
let mut ev = libc::epoll_event {
@ -617,13 +563,11 @@ fn test_socketpair_epollerr() {
// epoll can lose events if they don't fit in the output buffer.
fn test_epoll_lost_events() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Create a socketpair instance.
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
errno_check(unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) });
// Register both fd to the same epoll instance.
let mut ev = libc::epoll_event { events: EPOLL_IN_OUT_ET, u64: fds[0] as u64 };
@ -649,13 +593,12 @@ fn test_epoll_lost_events() {
// Related discussion in https://github.com/rust-lang/miri/pull/3818#discussion_r1720679440.
fn test_ready_list_fetching_logic() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Create two eventfd instances.
let flags = libc::EFD_NONBLOCK | libc::EFD_CLOEXEC;
let fd0 = unsafe { libc::eventfd(0, flags) };
let fd1 = unsafe { libc::eventfd(0, flags) };
let fd0 = errno_result(unsafe { libc::eventfd(0, flags) }).unwrap();
let fd1 = errno_result(unsafe { libc::eventfd(0, flags) }).unwrap();
// Register both fd to the same epoll instance. At this point, both of them are on the ready list.
let mut ev = libc::epoll_event { events: EPOLL_IN_OUT_ET, u64: fd0 as u64 };
@ -666,8 +609,7 @@ fn test_ready_list_fetching_logic() {
assert_eq!(res, 0);
// Close fd0 so the first entry in the ready list will be empty.
let res = unsafe { libc::close(fd0) };
assert_eq!(res, 0);
errno_check(unsafe { libc::close(fd0) });
// Notification for fd1 should be returned.
let expected_event1 = u32::try_from(libc::EPOLLOUT).unwrap();
@ -679,8 +621,7 @@ fn test_ready_list_fetching_logic() {
// (The docs say loops cause EINVAL, but experiments show it is EFAULT.)
fn test_epoll_ctl_epfd_equal_fd() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
let array_ptr = std::ptr::without_provenance_mut::<libc::epoll_event>(0x100);
let res = unsafe { libc::epoll_ctl(epfd, libc::EPOLL_CTL_ADD, epfd, array_ptr) };
@ -699,8 +640,7 @@ fn test_epoll_ctl_notification() {
// Create a socketpair instance.
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
errno_check(unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) });
// Register one side of the socketpair with epoll.
let mut ev = libc::epoll_event { events: EPOLL_IN_OUT_ET, u64: fds[0] as u64 };
@ -736,11 +676,10 @@ fn test_epoll_ctl_notification() {
fn test_issue_3858() {
// Create an eventfd instance.
let flags = libc::EFD_NONBLOCK | libc::EFD_CLOEXEC;
let fd = unsafe { libc::eventfd(0, flags) };
let fd = errno_result(unsafe { libc::eventfd(0, flags) }).unwrap();
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
let epfd = errno_result(unsafe { libc::epoll_create1(0) }).unwrap();
// Register eventfd with EPOLLIN | EPOLLET.
let mut ev = libc::epoll_event {
@ -755,8 +694,7 @@ fn test_issue_3858() {
assert_ne!(newfd, -1);
// Close the old epoll instance, so the new FD is now the only FD.
let res = unsafe { libc::close(epfd) };
assert_eq!(res, 0);
errno_check(unsafe { libc::close(epfd) });
// Write to the eventfd instance.
let sized_8_data: [u8; 8] = 1_u64.to_ne_bytes();
@ -772,8 +710,7 @@ fn test_issue_4374() {
// Create a socketpair instance, make it non-blocking.
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
errno_check(unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) });
assert_eq!(unsafe { libc::fcntl(fds[0], libc::F_SETFL, libc::O_NONBLOCK) }, 0);
assert_eq!(unsafe { libc::fcntl(fds[1], libc::F_SETFL, libc::O_NONBLOCK) }, 0);
@ -805,8 +742,7 @@ fn test_issue_4374_reads() {
// Create a socketpair instance, make it non-blocking.
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
errno_check(unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) });
assert_eq!(unsafe { libc::fcntl(fds[0], libc::F_SETFL, libc::O_NONBLOCK) }, 0);
assert_eq!(unsafe { libc::fcntl(fds[1], libc::F_SETFL, libc::O_NONBLOCK) }, 0);

View file

@ -3,11 +3,13 @@
//@compile-flags: -Zmiri-disable-isolation
use std::fs::File;
use std::io::Error;
use std::os::fd::AsRawFd;
#[path = "../../utils/libc.rs"]
mod libc_utils;
#[path = "../../utils/mod.rs"]
mod utils;
use libc_utils::*;
fn main() {
let bytes = b"Hello, World!\n";
@ -17,57 +19,44 @@ fn main() {
// Test that we can apply many shared locks
for file in files.iter() {
let fd = file.as_raw_fd();
let ret = unsafe { libc::flock(fd, libc::LOCK_SH) };
if ret != 0 {
panic!("flock error: {}", Error::last_os_error());
}
errno_check(unsafe { libc::flock(file.as_raw_fd(), libc::LOCK_SH) });
}
// Test that shared lock prevents exclusive lock
{
let fd = files[0].as_raw_fd();
let ret = unsafe { libc::flock(fd, libc::LOCK_EX | libc::LOCK_NB) };
assert_eq!(ret, -1);
let err = Error::last_os_error().raw_os_error().unwrap();
assert_eq!(err, libc::EWOULDBLOCK);
let err =
errno_result(unsafe { libc::flock(fd, libc::LOCK_EX | libc::LOCK_NB) }).unwrap_err();
assert_eq!(err.raw_os_error().unwrap(), libc::EWOULDBLOCK);
}
// Unlock shared lock
for file in files.iter() {
let fd = file.as_raw_fd();
let ret = unsafe { libc::flock(fd, libc::LOCK_UN) };
if ret != 0 {
panic!("flock error: {}", Error::last_os_error());
}
errno_check(unsafe { libc::flock(file.as_raw_fd(), libc::LOCK_UN) });
}
// Take exclusive lock
{
let fd = files[0].as_raw_fd();
let ret = unsafe { libc::flock(fd, libc::LOCK_EX) };
assert_eq!(ret, 0);
errno_check(unsafe { libc::flock(fd, libc::LOCK_EX) });
}
// Test that shared lock prevents exclusive and shared locks
{
let fd = files[1].as_raw_fd();
let ret = unsafe { libc::flock(fd, libc::LOCK_EX | libc::LOCK_NB) };
assert_eq!(ret, -1);
let err = Error::last_os_error().raw_os_error().unwrap();
assert_eq!(err, libc::EWOULDBLOCK);
let err =
errno_result(unsafe { libc::flock(fd, libc::LOCK_EX | libc::LOCK_NB) }).unwrap_err();
assert_eq!(err.raw_os_error().unwrap(), libc::EWOULDBLOCK);
let fd = files[2].as_raw_fd();
let ret = unsafe { libc::flock(fd, libc::LOCK_SH | libc::LOCK_NB) };
assert_eq!(ret, -1);
let err = Error::last_os_error().raw_os_error().unwrap();
assert_eq!(err, libc::EWOULDBLOCK);
let err =
errno_result(unsafe { libc::flock(fd, libc::LOCK_SH | libc::LOCK_NB) }).unwrap_err();
assert_eq!(err.raw_os_error().unwrap(), libc::EWOULDBLOCK);
}
// Unlock exclusive lock
{
let fd = files[0].as_raw_fd();
let ret = unsafe { libc::flock(fd, libc::LOCK_UN) };
assert_eq!(ret, 0);
errno_check(unsafe { libc::flock(fd, libc::LOCK_UN) });
}
}

View file

@ -36,6 +36,10 @@ fn main() {
test_posix_realpath_errors();
#[cfg(target_os = "linux")]
test_posix_fadvise();
#[cfg(not(target_os = "macos"))]
test_posix_fallocate::<libc::off_t>(libc::posix_fallocate);
#[cfg(any(target_os = "linux", target_os = "android"))]
test_posix_fallocate::<libc::off64_t>(libc::posix_fallocate64);
#[cfg(target_os = "linux")]
test_sync_file_range();
test_isatty();
@ -335,6 +339,74 @@ fn test_posix_fadvise() {
assert_eq!(result, 0);
}
#[cfg(not(target_os = "macos"))]
fn test_posix_fallocate<T: From<i32>>(
posix_fallocate: unsafe extern "C" fn(fd: libc::c_int, offset: T, len: T) -> libc::c_int,
) {
// libc::off_t is i32 in target i686-unknown-linux-gnu
// https://docs.rs/libc/latest/i686-unknown-linux-gnu/libc/type.off_t.html
let test_errors = || {
// invalid fd
let ret = unsafe { posix_fallocate(42, T::from(0), T::from(10)) };
assert_eq!(ret, libc::EBADF);
let path = utils::prepare("miri_test_libc_posix_fallocate_errors.txt");
let file = File::create(&path).unwrap();
// invalid offset
let ret = unsafe { posix_fallocate(file.as_raw_fd(), T::from(-10), T::from(10)) };
assert_eq!(ret, libc::EINVAL);
// invalid len
let ret = unsafe { posix_fallocate(file.as_raw_fd(), T::from(0), T::from(-10)) };
assert_eq!(ret, libc::EINVAL);
// fd not writable
let c_path = CString::new(path.as_os_str().as_bytes()).expect("CString::new failed");
let fd = unsafe { libc::open(c_path.as_ptr(), libc::O_RDONLY) };
let ret = unsafe { posix_fallocate(fd, T::from(0), T::from(10)) };
assert_eq!(ret, libc::EBADF);
};
let test = || {
let bytes = b"hello";
let path = utils::prepare("miri_test_libc_posix_fallocate.txt");
let mut file = File::create(&path).unwrap();
file.write_all(bytes).unwrap();
file.sync_all().unwrap();
assert_eq!(file.metadata().unwrap().len(), 5);
let c_path = CString::new(path.as_os_str().as_bytes()).expect("CString::new failed");
let fd = unsafe { libc::open(c_path.as_ptr(), libc::O_RDWR) };
// Allocate to a bigger size from offset 0
let mut res = unsafe { posix_fallocate(fd, T::from(0), T::from(10)) };
assert_eq!(res, 0);
assert_eq!(file.metadata().unwrap().len(), 10);
// Write after allocation
file.write(b"dup").unwrap();
file.sync_all().unwrap();
assert_eq!(file.metadata().unwrap().len(), 10);
// Can't truncate to a smaller size with possix_fallocate
res = unsafe { posix_fallocate(fd, T::from(0), T::from(3)) };
assert_eq!(res, 0);
assert_eq!(file.metadata().unwrap().len(), 10);
// Allocate from offset
res = unsafe { posix_fallocate(fd, T::from(7), T::from(7)) };
assert_eq!(res, 0);
assert_eq!(file.metadata().unwrap().len(), 14);
remove_file(&path).unwrap();
};
test_errors();
test();
}
#[cfg(target_os = "linux")]
fn test_sync_file_range() {
use std::io::Write;

View file

@ -55,6 +55,94 @@ unsafe fn test_avx512() {
assert_eq_m512i(r, e);
}
test_mm512_sad_epu8();
#[target_feature(enable = "avx512bw")]
unsafe fn test_mm512_maddubs_epi16() {
// `a` is interpreted as `u8x16`, but `_mm512_set_epi8` expects `i8`, so we have to cast.
#[rustfmt::skip]
let a = _mm512_set_epi8(
255u8 as i8, 255u8 as i8, 60, 50, 100, 100, 255u8 as i8, 200u8 as i8,
255u8 as i8, 200u8 as i8, 200u8 as i8, 100, 60, 50, 20, 10,
255u8 as i8, 255u8 as i8, 60, 50, 100, 100, 255u8 as i8, 200u8 as i8,
255u8 as i8, 200u8 as i8, 200u8 as i8, 100, 60, 50, 20, 10,
255u8 as i8, 255u8 as i8, 60, 50, 100, 100, 255u8 as i8, 200u8 as i8,
255u8 as i8, 200u8 as i8, 200u8 as i8, 100, 60, 50, 20, 10,
255u8 as i8, 255u8 as i8, 60, 50, 100, 100, 255u8 as i8, 200u8 as i8,
255u8 as i8, 200u8 as i8, 200u8 as i8, 100, 60, 50, 20, 10,
);
let b = _mm512_set_epi8(
64, 64, -2, 1, 100, 100, -128, -128, //
127, 127, -1, 1, 2, 2, 1, 1, //
64, 64, -2, 1, 100, 100, -128, -128, //
127, 127, -1, 1, 2, 2, 1, 1, //
64, 64, -2, 1, 100, 100, -128, -128, //
127, 127, -1, 1, 2, 2, 1, 1, //
64, 64, -2, 1, 100, 100, -128, -128, //
127, 127, -1, 1, 2, 2, 1, 1, //
);
let r = _mm512_maddubs_epi16(a, b);
let e = _mm512_set_epi16(
32640, -70, 20000, -32768, 32767, -100, 220, 30, //
32640, -70, 20000, -32768, 32767, -100, 220, 30, //
32640, -70, 20000, -32768, 32767, -100, 220, 30, //
32640, -70, 20000, -32768, 32767, -100, 220, 30, //
);
assert_eq_m512i(r, e);
}
test_mm512_maddubs_epi16();
#[target_feature(enable = "avx512f")]
unsafe fn test_mm512_permutexvar_epi32() {
let a = _mm512_set_epi32(
15, 14, 13, 12, //
11, 10, 9, 8, //
7, 6, 5, 4, //
3, 2, 1, 0, //
);
let idx_identity = _mm512_set_epi32(
15, 14, 13, 12, //
11, 10, 9, 8, //
7, 6, 5, 4, //
3, 2, 1, 0, //
);
let r_id = _mm512_permutexvar_epi32(idx_identity, a);
assert_eq_m512i(r_id, a);
// Test some out-of-bounds indices.
let edge_cases = _mm512_set_epi32(
0,
-1,
-128,
i32::MIN,
15,
16,
128,
i32::MAX,
0,
-1,
-128,
i32::MIN,
15,
16,
128,
i32::MAX,
);
let r = _mm512_permutexvar_epi32(edge_cases, a);
let e = _mm512_set_epi32(0, 15, 0, 0, 15, 0, 0, 15, 0, 15, 0, 0, 15, 0, 0, 15);
assert_eq_m512i(r, e);
}
test_mm512_permutexvar_epi32();
}
// Some of the constants in the tests below are just bit patterns. They should not

View file

@ -0,0 +1,8 @@
//! Ensure that the target features given in the target spec are actually enabled.
//@only-target: armv7
fn main() {
assert!(cfg!(target_feature = "v7"));
assert!(cfg!(target_feature = "vfp2"));
assert!(cfg!(target_feature = "thumb2"));
}

View file

@ -338,8 +338,8 @@ fn main() -> Result<()> {
}
// We only enable GenMC tests when the `genmc` feature is enabled, but also only on platforms we support:
// FIXME(genmc,macos): Add `target_os = "macos"` once `https://github.com/dtolnay/cxx/issues/1535` is fixed.
// FIXME(genmc,cross-platform): remove `host == target` check once cross-platform support with GenMC is possible.
// FIXME(genmc,cross-platform): Technically we do support cross-target execution as long as the
// target is also 64bit little-endian, so `host == target` is too strict.
if cfg!(all(
feature = "genmc",
target_os = "linux",

View file

@ -1,6 +1,24 @@
//! Utils that need libc.
#![allow(dead_code)]
use std::{fmt, io};
/// Handles the usual libc function that returns `-1` to indicate an error.
#[track_caller]
pub fn errno_result<T: From<i8> + Ord>(ret: T) -> io::Result<T> {
use std::cmp::Ordering;
match ret.cmp(&(-1i8).into()) {
Ordering::Equal => Err(io::Error::last_os_error()),
Ordering::Greater => Ok(ret),
Ordering::Less => panic!("unexpected return value: less than -1"),
}
}
/// Check that a function with errno error handling succeeded (i.e., returned 0).
#[track_caller]
pub fn errno_check<T: From<i8> + Ord + fmt::Debug>(ret: T) {
assert_eq!(errno_result(ret).unwrap(), 0i8.into(), "wrong successful result");
}
pub unsafe fn read_all(
fd: libc::c_int,
buf: *mut libc::c_void,
@ -22,6 +40,7 @@ pub unsafe fn read_all(
return read_so_far as libc::ssize_t;
}
/// Read exactly `N` bytes from `fd`. Error if that many bytes could not be read.
#[track_caller]
pub fn read_all_into_array<const N: usize>(fd: libc::c_int) -> Result<[u8; N], libc::ssize_t> {
let mut buf = [0; N];
@ -52,6 +71,7 @@ pub unsafe fn write_all(
return written_so_far as libc::ssize_t;
}
/// Write the entire `buf` to `fd`. Error if not all bytes could be written.
#[track_caller]
pub fn write_all_from_slice(fd: libc::c_int, buf: &[u8]) -> Result<(), libc::ssize_t> {
let res = unsafe { write_all(fd, buf.as_ptr().cast(), buf.len()) };
@ -62,3 +82,55 @@ pub fn write_all_from_slice(fd: libc::c_int, buf: &[u8]) -> Result<(), libc::ssi
Err(res)
}
}
#[cfg(any(target_os = "linux", target_os = "android", target_os = "illumos"))]
#[allow(unused_imports)]
pub mod epoll {
use libc::c_int;
pub use libc::{EPOLL_CTL_ADD, EPOLL_CTL_DEL, EPOLL_CTL_MOD};
// Re-export some constants we need a lot for this.
pub use libc::{EPOLLET, EPOLLHUP, EPOLLIN, EPOLLOUT, EPOLLRDHUP};
use super::*;
/// The libc epoll_event type doesn't fit to the EPOLLIN etc constants, so we have our
/// own type. We also make the data field an int since we typically want to store FDs there.
#[derive(PartialEq, Debug)]
pub struct Ev {
pub events: c_int,
pub data: c_int,
}
#[track_caller]
pub fn epoll_ctl(epfd: c_int, op: c_int, fd: c_int, event: Ev) -> io::Result<()> {
let mut event = libc::epoll_event {
events: event.events.cast_unsigned(),
u64: event.data.try_into().unwrap(),
};
let ret = errno_result(unsafe { libc::epoll_ctl(epfd, op, fd, &raw mut event) })?;
assert_eq!(ret, 0);
Ok(())
}
/// Helper for the common case of adding an FD to an epoll with the FD itself being
/// the `data`.
#[track_caller]
pub fn epoll_ctl_add(epfd: c_int, fd: c_int, events: c_int) -> io::Result<()> {
epoll_ctl(epfd, EPOLL_CTL_ADD, fd, Ev { events, data: fd })
}
#[track_caller]
pub fn check_epoll_wait_noblock<const N: usize>(epfd: i32, expected: &[Ev]) {
let mut array: [libc::epoll_event; N] = [libc::epoll_event { events: 0, u64: 0 }; N];
let num = errno_result(unsafe {
libc::epoll_wait(epfd, array.as_mut_ptr(), N.try_into().unwrap(), 0)
})
.expect("epoll_wait returned an error");
let got = &mut array[..num.try_into().unwrap()];
let got = got
.iter()
.map(|e| Ev { events: e.events.cast_signed(), data: e.u64.try_into().unwrap() })
.collect::<Vec<_>>();
assert_eq!(got, expected, "got wrong notifications");
}
}