Merge remote-tracking branch 'libm/merge-into-builtins-prep' into absorb-libm

Absorb the libm repository into `compiler-builtins`.

This was done using `git-filter-repo` to ensure hashes mentioned in
commit messages were correctly rewritten, I used the same strategy to
merge `ctest` into `libc` [1] and it worked quite well. Approximately:

    # `git filter-repo` requires a clean clone
    git clone https://github.com/rust-lang/libm.git

    # Move all code to a `libm` subdirectory for all history
    git filter-repo --to-subdirectory-filter libm

    # The default merge messages are "merge pull request #nnn from
    # user/branch". GH links these incorrectly in the new repo, so
    # rewrite messages from `#nnn` to `rust-lang/libm#nnn`.
    echo 'regex:(^|\s)(#\d+)==>\1rust-lang/libm\2' > messages.txt
    git filter-repo --replace-message messages.txt

    # Re-add a remote and push as a new branch
    git remote add upstream https://github.com/rust-lang/libm.git
    git switch -c merge-into-builtins-prep
    git push --set-upstream upstream merge-into-builtins-prep

    # Now in a compiler-builtins, add `libm` as a remote
    git remote add libm https://github.com/rust-lang/libm.git
    git fetch libm

    # Do the merge that creates this commit
    git merge libm/merge-into-builtins-prep --allow-unrelated-histories

The result should be correct git history and blame for all files, with
messages that use correct rewritten hashes when they are referenced.
There is some reorganization and CI work needed, but that will be a
follow up.

After this merges I will need to push tags from `libm`, which I have
already rewritten to include a `libm-` prefix. Old tags in
compiler-builtins should likely also be rewritten to add a prefix (we
already have this for newer tags), but this can be done at any point.

* Original remote: https://github.com/rust-lang/libm.git
* Default HEAD: c94017af75c3ec4616d5b7f9b6b1b3826b934469 ("Migrate all
  crates except `libm` to edition 2024")
* HEAD after rewriting history: 15fb6307f6dc295fb965d1c4f486571cc18ab6b3
  ("Migrate all crates except `libm` to edition 2024")

[1]: https://github.com/rust-lang/libc/pull/4283#issuecomment-2773986492
This commit is contained in:
Trevor Gross 2025-04-19 20:17:07 +00:00
commit e646c73c28
261 changed files with 31149 additions and 0 deletions

View file

@ -0,0 +1,21 @@
# EditorConfig helps developers define and maintain consistent
# coding styles between different editors and IDEs
# editorconfig.org
root = true
[*]
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
indent_style = space
indent_size = 4
[*.md]
# double whitespace at end of line
# denotes a line break in Markdown
trim_trailing_whitespace = false
[*.yml]
indent_size = 2

View file

@ -0,0 +1,5 @@
# Use `git config blame.ignorerevsfile .git-blame-ignore-revs` to make
# `git blame` ignore the following commits.
# Reformat with a new `.rustfmt.toml`
5882cabb83c30bf7c36023f9a55a80583636b0e8

View file

@ -0,0 +1,324 @@
name: CI
on:
push:
branches:
- master
pull_request:
concurrency:
# Make sure that new pushes cancel running jobs
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env:
CARGO_TERM_COLOR: always
RUSTDOCFLAGS: -Dwarnings
RUSTFLAGS: -Dwarnings
RUST_BACKTRACE: full
BENCHMARK_RUSTC: nightly-2025-01-16 # Pin the toolchain for reproducable results
jobs:
test:
name: Build and test
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
include:
- target: aarch64-apple-darwin
os: macos-15
# FIXME: pinned due to https://github.com/llvm/llvm-project/issues/127804
channel: nightly-2025-02-07
- target: aarch64-unknown-linux-gnu
os: ubuntu-24.04-arm
# FIXME: pinned due to https://github.com/llvm/llvm-project/issues/127804
channel: nightly-2025-02-07
- target: aarch64-pc-windows-msvc
os: windows-2025
build_only: 1 # Can't run on x86 hosts
- target: arm-unknown-linux-gnueabi
os: ubuntu-24.04
- target: arm-unknown-linux-gnueabihf
os: ubuntu-24.04
- target: armv7-unknown-linux-gnueabihf
os: ubuntu-24.04
- target: i586-unknown-linux-gnu
os: ubuntu-24.04
- target: i686-unknown-linux-gnu
os: ubuntu-24.04
- target: loongarch64-unknown-linux-gnu
os: ubuntu-24.04
- target: powerpc-unknown-linux-gnu
os: ubuntu-24.04
- target: powerpc64-unknown-linux-gnu
os: ubuntu-24.04
- target: powerpc64le-unknown-linux-gnu
os: ubuntu-24.04
- target: riscv64gc-unknown-linux-gnu
os: ubuntu-24.04
- target: thumbv6m-none-eabi
os: ubuntu-24.04
- target: thumbv7em-none-eabi
os: ubuntu-24.04
- target: thumbv7em-none-eabihf
os: ubuntu-24.04
- target: thumbv7m-none-eabi
os: ubuntu-24.04
- target: x86_64-unknown-linux-gnu
os: ubuntu-24.04
- target: x86_64-apple-darwin
os: macos-13
- target: wasm32-unknown-unknown
os: ubuntu-24.04
build_only: 1
- target: i686-pc-windows-msvc
os: windows-2025
- target: x86_64-pc-windows-msvc
os: windows-2025
- target: i686-pc-windows-gnu
os: windows-2025
# FIXME: pinned due to https://github.com/rust-lang/rust/issues/136795
channel: nightly-2025-02-07-i686-gnu
- target: x86_64-pc-windows-gnu
os: windows-2025
channel: nightly-x86_64-gnu
runs-on: ${{ matrix.os }}
env:
BUILD_ONLY: ${{ matrix.build_only }}
steps:
- name: Print runner information
run: uname -a
- uses: actions/checkout@v4
with:
submodules: true
- name: Install Rust (rustup)
shell: bash
run: |
channel="nightly"
# Account for channels that have required components (MinGW)
[ -n "${{ matrix.channel }}" ] && channel="${{ matrix.channel }}"
rustup update "$channel" --no-self-update
rustup default "$channel"
rustup target add "${{ matrix.target }}"
rustup component add clippy llvm-tools-preview
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
with:
key: ${{ matrix.target }}
- name: Verify API list
if: matrix.os == 'ubuntu-24.04'
run: python3 etc/update-api-list.py --check
# Non-linux tests just use our raw script
- name: Run locally
if: matrix.os != 'ubuntu-24.04' || contains(matrix.target, 'wasm')
shell: bash
run: ./ci/run.sh ${{ matrix.target }}
# Otherwise we use our docker containers to run builds
- name: Run in Docker
if: matrix.os == 'ubuntu-24.04' && !contains(matrix.target, 'wasm')
run: |
rustup target add x86_64-unknown-linux-musl
cargo generate-lockfile && ./ci/run-docker.sh ${{ matrix.target }}
- name: Print test logs if available
if: always()
run: if [ -f "target/test-log.txt" ]; then cat target/test-log.txt; fi
shell: bash
clippy:
name: Clippy
runs-on: ubuntu-24.04
timeout-minutes: 10
steps:
- uses: actions/checkout@master
with:
submodules: true
- name: Install Rust
run: |
rustup update nightly --no-self-update
rustup default nightly
rustup component add clippy
- uses: Swatinem/rust-cache@v2
- run: cargo clippy --all --all-features --all-targets
builtins:
name: Check use with compiler-builtins
runs-on: ubuntu-24.04
timeout-minutes: 10
steps:
- uses: actions/checkout@master
- name: Install Rust
run: rustup update nightly --no-self-update && rustup default nightly
- uses: Swatinem/rust-cache@v2
- run: cargo check --manifest-path crates/compiler-builtins-smoke-test/Cargo.toml
- run: cargo test --manifest-path crates/compiler-builtins-smoke-test/Cargo.toml
benchmarks:
name: Benchmarks
runs-on: ubuntu-24.04
timeout-minutes: 20
steps:
- uses: actions/checkout@master
with:
submodules: true
- uses: taiki-e/install-action@cargo-binstall
- name: Set up dependencies
run: |
sudo apt update
sudo apt install -y valgrind gdb libc6-dbg # Needed for iai-callgrind
rustup update "$BENCHMARK_RUSTC" --no-self-update
rustup default "$BENCHMARK_RUSTC"
# Install the version of iai-callgrind-runner that is specified in Cargo.toml
iai_version="$(cargo metadata --format-version=1 --features icount |
jq -r '.packages[] | select(.name == "iai-callgrind").version')"
cargo binstall -y iai-callgrind-runner --version "$iai_version"
sudo apt-get install valgrind
- uses: Swatinem/rust-cache@v2
- name: Run icount benchmarks
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
run: ./ci/bench-icount.sh
- name: Upload the benchmark baseline
uses: actions/upload-artifact@v4
with:
name: ${{ env.BASELINE_NAME }}
path: ${{ env.BASELINE_NAME }}.tar.xz
- name: Run wall time benchmarks
run: |
# Always use the same seed for benchmarks. Ideally we should switch to a
# non-random generator.
export LIBM_SEED=benchesbenchesbenchesbencheswoo!
cargo bench --all --features short-benchmarks,build-musl,force-soft-floats
- name: Print test logs if available
if: always()
run: if [ -f "target/test-log.txt" ]; then cat target/test-log.txt; fi
shell: bash
msrv:
name: Check MSRV
runs-on: ubuntu-24.04
timeout-minutes: 10
env:
RUSTFLAGS: # No need to check warnings on old MSRV, unset `-Dwarnings`
steps:
- uses: actions/checkout@master
- name: Install Rust
run: |
msrv="$(perl -ne 'print if s/rust-version\s*=\s*"(.*)"/\1/g' libm/Cargo.toml)"
echo "MSRV: $msrv"
rustup update "$msrv" --no-self-update && rustup default "$msrv"
- uses: Swatinem/rust-cache@v2
- run: |
# FIXME(msrv): Remove the workspace Cargo.toml so 1.63 cargo doesn't see
# `edition = "2024"` and get spooked.
rm Cargo.toml
cargo build --manifest-path libm/Cargo.toml
rustfmt:
name: Rustfmt
runs-on: ubuntu-24.04
timeout-minutes: 10
steps:
- uses: actions/checkout@master
- name: Install Rust
run: |
rustup update nightly --no-self-update
rustup default nightly
rustup component add rustfmt
- run: cargo fmt -- --check
# Determine which extensive tests should be run based on changed files.
calculate_extensive_matrix:
name: Calculate job matrix
runs-on: ubuntu-24.04
timeout-minutes: 10
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
outputs:
matrix: ${{ steps.script.outputs.matrix }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 100
- name: Fetch pull request ref
run: git fetch origin "$GITHUB_REF:$GITHUB_REF"
if: github.event_name == 'pull_request'
- run: python3 ci/ci-util.py generate-matrix >> "$GITHUB_OUTPUT"
id: script
extensive:
name: Extensive tests for ${{ matrix.ty }}
needs:
# Wait on `clippy` so we have some confidence that the crate will build
- clippy
- calculate_extensive_matrix
runs-on: ubuntu-24.04
timeout-minutes: 240 # 4 hours
strategy:
matrix:
# Use the output from `calculate_extensive_matrix` to calculate the matrix
# FIXME: it would be better to run all jobs (i.e. all types) but mark those that
# didn't change as skipped, rather than completely excluding the job. However,
# this is not currently possible https://github.com/actions/runner/issues/1985.
include: ${{ fromJSON(needs.calculate_extensive_matrix.outputs.matrix).matrix }}
env:
TO_TEST: ${{ matrix.to_test }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install Rust
run: |
rustup update nightly --no-self-update
rustup default nightly
- uses: Swatinem/rust-cache@v2
- name: Run extensive tests
run: |
echo "Tests to run: '$TO_TEST'"
if [ -z "$TO_TEST" ]; then
echo "No tests to run, exiting."
exit
fi
set -x
# Run the non-extensive tests first to catch any easy failures
cargo t --profile release-checked -- "$TO_TEST"
LIBM_EXTENSIVE_TESTS="$TO_TEST" cargo t \
--features build-mpfr,unstable,force-soft-floats \
--profile release-checked \
-- extensive
- name: Print test logs if available
run: if [ -f "target/test-log.txt" ]; then cat target/test-log.txt; fi
shell: bash
success:
needs:
- test
- builtins
- benchmarks
- msrv
- rustfmt
- extensive
runs-on: ubuntu-24.04
timeout-minutes: 10
# GitHub branch protection is exceedingly silly and treats "jobs skipped because a dependency
# failed" as success. So we have to do some contortions to ensure the job fails if any of its
# dependencies fails.
if: always() # make sure this is never "skipped"
steps:
# Manually check the status of all dependencies. `if: failure()` does not work.
- name: check if any dependency failed
run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}'

View file

@ -0,0 +1,27 @@
name: Release-plz
permissions:
pull-requests: write
contents: write
on:
push:
branches:
- master
jobs:
release-plz:
name: Release-plz
runs-on: ubuntu-24.04
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Rust (rustup)
run: rustup update nightly --no-self-update && rustup default nightly
- name: Run release-plz
uses: MarcoIeni/release-plz-action@v0.5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}

View file

@ -0,0 +1,11 @@
**.bk
.#*
/bin
/math/src
target
Cargo.lock
**.tar.gz
# Benchmark cache
iai-home
baseline-*

View file

@ -0,0 +1,4 @@
[submodule "musl"]
path = crates/musl-math-sys/musl
url = https://git.musl-libc.org/git/musl
shallow = true

View file

@ -0,0 +1,5 @@
# This matches rustc
style_edition = "2024"
use_small_heuristics = "Max"
group_imports = "StdExternalCrate"
imports_granularity = "Module"

View file

@ -0,0 +1,175 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to
[Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.2.11](https://github.com/rust-lang/libm/compare/libm-v0.2.10...libm-v0.2.11) - 2024-10-28
### Fixed
- fix type of constants in ported sincosf ([#331](https://github.com/rust-lang/libm/pull/331))
### Other
- Disable a unit test that is failing on i586
- Add a procedural macro for expanding all function signatures
- Introduce `musl-math-sys` for bindings to musl math symbols
- Add basic docstrings to some functions ([#337](https://github.com/rust-lang/libm/pull/337))
## [0.2.10](https://github.com/rust-lang/libm/compare/libm-v0.2.9...libm-v0.2.10) - 2024-10-28
### Other
- Set the MSRV to 1.63 and test this in CI
## [0.2.9](https://github.com/rust-lang/libm/compare/libm-v0.2.8...libm-v0.2.9) - 2024-10-26
### Fixed
- Update exponent calculations in nextafter to match musl
### Changed
- Update licensing to MIT AND (MIT OR Apache-2.0), as this is derivative from
MIT-licensed musl.
- Set edition to 2021 for all crates
- Upgrade all dependencies
### Other
- Don't deny warnings in lib.rs
- Rename the `musl-bitwise-tests` feature to `test-musl-serialized`
- Rename the `musl-reference-tests` feature to `musl-bitwise-tests`
- Move `musl-reference-tests` to a new `libm-test` crate
- Add a `force-soft-floats` feature to prevent using any intrinsics or
arch-specific code
- Deny warnings in CI
- Fix `clippy::deprecated_cfg_attr` on compiler_builtins
- Corrected English typos
- Remove unneeded `extern core` in `tgamma`
- Allow internal_features lint when building with "unstable"
## [v0.2.1] - 2019-11-22
### Fixed
- sincosf
## [v0.2.0] - 2019-10-18
### Added
- Benchmarks
- signum
- remainder
- remainderf
- nextafter
- nextafterf
### Fixed
- Rounding to negative zero
- Overflows in rem_pio2 and remquo
- Overflows in fma
- sincosf
### Removed
- F32Ext and F64Ext traits
## [v0.1.4] - 2019-06-12
### Fixed
- Restored compatibility with Rust 1.31.0
## [v0.1.3] - 2019-05-14
### Added
- minf
- fmin
- fmaxf
- fmax
## [v0.1.2] - 2018-07-18
### Added
- acosf
- asin
- asinf
- atan
- atan2
- atan2f
- atanf
- cos
- cosf
- cosh
- coshf
- exp2
- expm1
- expm1f
- expo2
- fmaf
- pow
- sin
- sinf
- sinh
- sinhf
- tan
- tanf
- tanh
- tanhf
## [v0.1.1] - 2018-07-14
### Added
- acos
- acosf
- asin
- asinf
- atanf
- cbrt
- cbrtf
- ceil
- ceilf
- cosf
- exp
- exp2
- exp2f
- expm1
- expm1f
- fdim
- fdimf
- floorf
- fma
- fmod
- log
- log2
- log10
- log10f
- log1p
- log1pf
- log2f
- roundf
- sinf
- tanf
## v0.1.0 - 2018-07-13
- Initial release
[Unreleased]: https://github.com/japaric/libm/compare/v0.2.1...HEAD
[v0.2.1]: https://github.com/japaric/libm/compare/0.2.0...v0.2.1
[v0.2.0]: https://github.com/japaric/libm/compare/0.1.4...v0.2.0
[v0.1.4]: https://github.com/japaric/libm/compare/0.1.3...v0.1.4
[v0.1.3]: https://github.com/japaric/libm/compare/v0.1.2...0.1.3
[v0.1.2]: https://github.com/japaric/libm/compare/v0.1.1...v0.1.2
[v0.1.1]: https://github.com/japaric/libm/compare/v0.1.0...v0.1.1

View file

@ -0,0 +1,82 @@
# How to contribute
- Pick your favorite math function from the [issue tracker].
- Look for the C implementation of the function in the [MUSL source code][src].
- Copy paste the C code into a Rust file in the `src/math` directory and adjust
`src/math/mod.rs` accordingly. Also, uncomment the corresponding trait method
in `src/lib.rs`.
- Write some simple tests in your module (using `#[test]`)
- Run `cargo test` to make sure it works. Full tests are only run when enabling
features, see [Testing](#testing) below.
- Send us a pull request! Make sure to run `cargo fmt` on your code before
sending the PR. Also include "closes #42" in the PR description to close the
corresponding issue.
- :tada:
[issue tracker]: https://github.com/rust-lang/libm/issues
[src]: https://git.musl-libc.org/cgit/musl/tree/src/math
[`src/math/truncf.rs`]: https://github.com/rust-lang/libm/blob/master/src/math/truncf.rs
Check [PR #65] for an example.
[PR #65]: https://github.com/rust-lang/libm/pull/65
## Tips and tricks
- *IMPORTANT* The code in this crate will end up being used in the `core` crate so it can **not**
have any external dependencies (other than `core` itself).
- Only use relative imports within the `math` directory / module, e.g. `use self::fabs::fabs` or
`use super::k_cos`. Absolute imports from core are OK, e.g. `use core::u64`.
- To reinterpret a float as an integer use the `to_bits` method. The MUSL code uses the
`GET_FLOAT_WORD` macro, or a union, to do this operation.
- To reinterpret an integer as a float use the `f32::from_bits` constructor. The MUSL code uses the
`SET_FLOAT_WORD` macro, or a union, to do this operation.
- You may use other methods from core like `f64::is_nan`, etc. as appropriate.
- If you're implementing one of the private double-underscore functions, take a look at the
"source" name in the comment at the top for an idea for alternate naming. For example, `__sin`
was renamed to `k_sin` after the FreeBSD source code naming. Do `use` these private functions in
`mod.rs`.
- You may encounter weird literals like `0x1p127f` in the MUSL code. These are hexadecimal floating
point literals. Rust (the language) doesn't support these kind of literals. This crate provides
two macros, `hf32!` and `hf64!`, which convert string literals to floats at compile time.
```rust
assert_eq!(hf32!("0x1.ffep+8").to_bits(), 0x43fff000);
assert_eq!(hf64!("0x1.ffep+8").to_bits(), 0x407ffe0000000000);
```
- Rust code panics on arithmetic overflows when not optimized. You may need to use the [`Wrapping`]
newtype to avoid this problem, or individual methods like [`wrapping_add`].
[`Wrapping`]: https://doc.rust-lang.org/std/num/struct.Wrapping.html
[`wrapping_add`]: https://doc.rust-lang.org/std/primitive.u32.html#method.wrapping_add
## Testing
Normal tests can be executed with:
```sh
# Tests against musl require that the submodule is up to date.
git submodule init
git submodule update
# `--release` ables more test cases
cargo test --release
```
If you are on a system that cannot build musl or MPFR, passing
`--no-default-features` will run some limited tests.
The multiprecision tests use the [`rug`] crate for bindings to MPFR. MPFR can
be difficult to build on non-Unix systems, refer to [`gmp_mpfr_sys`] for help.
`build-musl` does not build with MSVC, Wasm, or Thumb.
[`rug`]: https://docs.rs/rug/latest/rug/
[`gmp_mpfr_sys`]: https://docs.rs/gmp-mpfr-sys/1.6.4/gmp_mpfr_sys/

View file

@ -0,0 +1,37 @@
[workspace]
resolver = "2"
members = [
"libm",
"crates/libm-macros",
"crates/libm-test",
"crates/musl-math-sys",
"crates/util",
]
default-members = [
"libm",
"crates/libm-macros",
"crates/libm-test"
]
exclude = [
# Requires `panic = abort` so can't be a member of the workspace
"crates/compiler-builtins-smoke-test",
]
# The default release profile is unchanged.
# Release mode with debug assertions
[profile.release-checked]
inherits = "release"
debug-assertions = true
overflow-checks = true
# Release with maximum optimizations, which is very slow to build. This is also
# what is needed to check `no-panic`.
[profile.release-opt]
inherits = "release"
codegen-units = 1
lto = "fat"
[profile.bench]
# Required for iai-callgrind
debug = true

View file

@ -0,0 +1,258 @@
rust-lang/libm as a whole is available for use under the MIT license:
------------------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------------------------------------------------------------------------------
As a contributor, you agree that your code can be used under either the MIT
license or the Apache-2.0 license:
------------------------------------------------------------------------------
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
This Rust library contains the following copyrights:
Copyright (c) 2018 Jorge Aparicio
Portions of this software are derived from third-party works licensed under
terms compatible with the above MIT license:
* musl libc https://www.musl-libc.org/. This library contains the following
copyright:
Copyright © 2005-2020 Rich Felker, et al.
* The CORE-MATH project https://core-math.gitlabpages.inria.fr/. CORE-MATH
routines are available under the MIT license on a per-file basis.
The musl libc COPYRIGHT file also includes the following notice relevant to
math portions of the library:
------------------------------------------------------------------------------
Much of the math library code (src/math/* and src/complex/*) is
Copyright © 1993,2004 Sun Microsystems or
Copyright © 2003-2011 David Schultz or
Copyright © 2003-2009 Steven G. Kargl or
Copyright © 2003-2009 Bruce D. Evans or
Copyright © 2008 Stephen L. Moshier or
Copyright © 2017-2018 Arm Limited
and labelled as such in comments in the individual source files. All
have been licensed under extremely permissive terms.
------------------------------------------------------------------------------
Copyright notices are retained in src/* files where relevant.

View file

@ -0,0 +1,56 @@
# `libm`
A port of [MUSL]'s libm to Rust.
[MUSL]: https://musl.libc.org/
## Goals
The short term goal of this library is to [enable math support (e.g. `sin`, `atan2`) for the
`wasm32-unknown-unknown` target][wasm] (cf. [rust-lang/compiler-builtins][pr]). The longer
term goal is to enable [math support in the `core` crate][core].
[wasm]: https://github.com/rust-lang/libm/milestone/1
[pr]: https://github.com/rust-lang/compiler-builtins/pull/248
[core]: https://github.com/rust-lang/libm/milestone/2
## Already usable
This crate is [on crates.io] and can be used today in stable `#![no_std]` programs.
The API documentation can be found [here](https://docs.rs/libm).
[on crates.io]: https://crates.io/crates/libm
## Benchmark
[benchmark]: #benchmark
The benchmarks are located in `crates/libm-bench` and require a nightly Rust toolchain.
To run all benchmarks:
> cargo +nightly bench --all
## Contributing
Please check [CONTRIBUTING.md](CONTRIBUTING.md)
## Minimum Rust version policy
This crate supports rustc 1.63 and newer.
## License
Usage is licensed under the MIT license ([LICENSE-MIT](LICENSE-MIT) or
https://opensource.org/licenses/MIT).
### Contribution
Contributions are licensed under both the MIT license and the Apache License,
Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or
https://www.apache.org/licenses/LICENSE-2.0). Unless you explicitly state
otherwise, any contribution intentionally submitted for inclusion in the work
by you, as defined in the Apache-2.0 license, shall be dual licensed as
mentioned, without any additional terms or conditions.
See `LICENSE.txt` for full details.

View file

@ -0,0 +1,58 @@
#!/bin/bash
set -eux
iai_home="iai-home"
# Download the baseline from master
./ci/ci-util.py locate-baseline --download --extract
# Run benchmarks once
function run_icount_benchmarks() {
cargo_args=(
"--bench" "icount"
"--no-default-features"
"--features" "unstable,unstable-float,icount"
)
iai_args=(
"--home" "$(pwd)/$iai_home"
"--regression=ir=5.0"
"--save-summary"
)
# Parse `cargo_arg0 cargo_arg1 -- iai_arg0 iai_arg1` syntax
parsing_iai_args=0
while [ "$#" -gt 0 ]; do
if [ "$parsing_iai_args" == "1" ]; then
iai_args+=("$1")
elif [ "$1" == "--" ]; then
parsing_iai_args=1
else
cargo_args+=("$1")
fi
shift
done
# Run iai-callgrind benchmarks
cargo bench "${cargo_args[@]}" -- "${iai_args[@]}"
# NB: iai-callgrind should exit on error but does not, so we inspect the sumary
# for errors. See https://github.com/iai-callgrind/iai-callgrind/issues/337
if [ -n "${PR_NUMBER:-}" ]; then
# If this is for a pull request, ignore regressions if specified.
./ci/ci-util.py check-regressions --home "$iai_home" --allow-pr-override "$PR_NUMBER"
else
./ci/ci-util.py check-regressions --home "$iai_home" || true
fi
}
# Run once with softfloats, once with arch instructions enabled
run_icount_benchmarks --features force-soft-floats -- --save-baseline=softfloat
run_icount_benchmarks -- --save-baseline=hardfloat
# Name and tar the new baseline
name="baseline-icount-$(date -u +'%Y%m%d%H%M')-${GITHUB_SHA:0:12}"
echo "BASELINE_NAME=$name" >>"$GITHUB_ENV"
tar cJf "$name.tar.xz" "$iai_home"

View file

@ -0,0 +1,416 @@
#!/usr/bin/env python3
"""Utilities for CI.
This dynamically prepares a list of routines that had a source file change based on
git history.
"""
import json
import os
import subprocess as sp
import sys
from dataclasses import dataclass
from glob import glob, iglob
from inspect import cleandoc
from os import getenv
from pathlib import Path
from typing import TypedDict, Self
USAGE = cleandoc(
"""
usage:
./ci/ci-util.py <COMMAND> [flags]
COMMAND:
generate-matrix
Calculate a matrix of which functions had source change, print that as
a JSON object.
locate-baseline [--download] [--extract]
Locate the most recent benchmark baseline available in CI and, if flags
specify, download and extract it. Never exits with nonzero status if
downloading fails.
Note that `--extract` will overwrite files in `iai-home`.
check-regressions [--home iai-home] [--allow-pr-override pr_number]
Check `iai-home` (or `iai-home` if unspecified) for `summary.json`
files and see if there are any regressions. This is used as a workaround
for `iai-callgrind` not exiting with error status; see
<https://github.com/iai-callgrind/iai-callgrind/issues/337>.
If `--allow-pr-override` is specified, the regression check will not exit
with failure if any line in the PR starts with `allow-regressions`.
"""
)
REPO_ROOT = Path(__file__).parent.parent
GIT = ["git", "-C", REPO_ROOT]
DEFAULT_BRANCH = "master"
WORKFLOW_NAME = "CI" # Workflow that generates the benchmark artifacts
ARTIFACT_GLOB = "baseline-icount*"
# Place this in a PR body to skip regression checks (must be at the start of a line).
REGRESSION_DIRECTIVE = "ci: allow-regressions"
# Place this in a PR body to skip extensive tests
SKIP_EXTENSIVE_DIRECTIVE = "ci: skip-extensive"
# Place this in a PR body to allow running a large number of extensive tests. If not
# set, this script will error out if a threshold is exceeded in order to avoid
# accidentally spending huge amounts of CI time.
ALLOW_MANY_EXTENSIVE_DIRECTIVE = "ci: allow-many-extensive"
MANY_EXTENSIVE_THRESHOLD = 20
# Don't run exhaustive tests if these files change, even if they contaiin a function
# definition.
IGNORE_FILES = [
"libm/src/math/support/",
"libm/src/libm_helper.rs",
"libm/src/math/arch/intrinsics.rs",
]
TYPES = ["f16", "f32", "f64", "f128"]
def eprint(*args, **kwargs):
"""Print to stderr."""
print(*args, file=sys.stderr, **kwargs)
@dataclass
class PrInfo:
"""GitHub response for PR query"""
body: str
commits: list[str]
created_at: str
number: int
@classmethod
def load(cls, pr_number: int | str) -> Self:
"""For a given PR number, query the body and commit list"""
pr_info = sp.check_output(
[
"gh",
"pr",
"view",
str(pr_number),
"--json=number,commits,body,createdAt",
# Flatten the commit list to only hashes, change a key to snake naming
"--jq=.commits |= map(.oid) | .created_at = .createdAt | del(.createdAt)",
],
text=True,
)
eprint("PR info:", json.dumps(pr_info, indent=4))
return cls(**json.loads(pr_info))
def contains_directive(self, directive: str) -> bool:
"""Return true if the provided directive is on a line in the PR body"""
lines = self.body.splitlines()
return any(line.startswith(directive) for line in lines)
class FunctionDef(TypedDict):
"""Type for an entry in `function-definitions.json`"""
sources: list[str]
type: str
@dataclass
class Context:
gh_ref: str | None
changed: list[Path]
defs: dict[str, FunctionDef]
def __init__(self) -> None:
self.gh_ref = getenv("GITHUB_REF")
self.changed = []
self._init_change_list()
with open(REPO_ROOT.joinpath("etc/function-definitions.json")) as f:
defs = json.load(f)
defs.pop("__comment", None)
self.defs = defs
def _init_change_list(self):
"""Create a list of files that have been changed. This uses GITHUB_REF if
available, otherwise a diff between `HEAD` and `master`.
"""
# For pull requests, GitHub creates a ref `refs/pull/1234/merge` (1234 being
# the PR number), and sets this as `GITHUB_REF`.
ref = self.gh_ref
eprint(f"using ref `{ref}`")
if ref is None or "merge" not in ref:
# If the ref is not for `merge` then we are not in PR CI
eprint("No diff available for ref")
return
# The ref is for a dummy merge commit. We can extract the merge base by
# inspecting all parents (`^@`).
merge_sha = sp.check_output(
GIT + ["show-ref", "--hash", ref], text=True
).strip()
merge_log = sp.check_output(GIT + ["log", "-1", merge_sha], text=True)
eprint(f"Merge:\n{merge_log}\n")
parents = (
sp.check_output(GIT + ["rev-parse", f"{merge_sha}^@"], text=True)
.strip()
.splitlines()
)
assert len(parents) == 2, f"expected two-parent merge but got:\n{parents}"
base = parents[0].strip()
incoming = parents[1].strip()
eprint(f"base: {base}, incoming: {incoming}")
textlist = sp.check_output(
GIT + ["diff", base, incoming, "--name-only"], text=True
)
self.changed = [Path(p) for p in textlist.splitlines()]
@staticmethod
def _ignore_file(fname: str) -> bool:
return any(fname.startswith(pfx) for pfx in IGNORE_FILES)
def changed_routines(self) -> dict[str, list[str]]:
"""Create a list of routines for which one or more files have been updated,
separated by type.
"""
routines = set()
for name, meta in self.defs.items():
# Don't update if changes to the file should be ignored
sources = (f for f in meta["sources"] if not self._ignore_file(f))
# Select changed files
changed = [f for f in sources if Path(f) in self.changed]
if len(changed) > 0:
eprint(f"changed files for {name}: {changed}")
routines.add(name)
ret: dict[str, list[str]] = {}
for r in sorted(routines):
ret.setdefault(self.defs[r]["type"], []).append(r)
return ret
def make_workflow_output(self) -> str:
"""Create a JSON object a list items for each type's changed files, if any
did change, and the routines that were affected by the change.
"""
pr_number = os.environ.get("PR_NUMBER")
skip_tests = False
error_on_many_tests = False
if pr_number is not None and len(pr_number) > 0:
pr = PrInfo.load(pr_number)
skip_tests = pr.contains_directive(SKIP_EXTENSIVE_DIRECTIVE)
error_on_many_tests = not pr.contains_directive(
ALLOW_MANY_EXTENSIVE_DIRECTIVE
)
if skip_tests:
eprint("Skipping all extensive tests")
changed = self.changed_routines()
ret = []
total_to_test = 0
for ty in TYPES:
ty_changed = changed.get(ty, [])
ty_to_test = [] if skip_tests else ty_changed
total_to_test += len(ty_to_test)
item = {
"ty": ty,
"changed": ",".join(ty_changed),
"to_test": ",".join(ty_to_test),
}
ret.append(item)
output = json.dumps({"matrix": ret}, separators=(",", ":"))
eprint(f"output: {output}")
eprint(f"total extensive tests: {total_to_test}")
if error_on_many_tests and total_to_test > MANY_EXTENSIVE_THRESHOLD:
eprint(
f"More than {MANY_EXTENSIVE_THRESHOLD} tests would be run; add"
f" `{ALLOW_MANY_EXTENSIVE_DIRECTIVE}` to the PR body if this is intentional"
)
exit(1)
return output
def locate_baseline(flags: list[str]) -> None:
"""Find the most recent baseline from CI, download it if specified.
This returns rather than erroring, even if the `gh` commands fail. This is to avoid
erroring in CI if the baseline is unavailable (artifact time limit exceeded, first
run on the branch, etc).
"""
download = False
extract = False
while len(flags) > 0:
match flags[0]:
case "--download":
download = True
case "--extract":
extract = True
case _:
eprint(USAGE)
exit(1)
flags = flags[1:]
if extract and not download:
eprint("cannot extract without downloading")
exit(1)
try:
# Locate the most recent job to complete with success on our branch
latest_job = sp.check_output(
[
"gh",
"run",
"list",
"--status=success",
f"--branch={DEFAULT_BRANCH}",
"--json=databaseId,url,headSha,conclusion,createdAt,"
"status,workflowDatabaseId,workflowName",
# Return the first array element matching our workflow name. NB: cannot
# just use `--limit=1`, jq filtering happens after limiting. We also
# cannot just use `--workflow` because GH gets confused from
# different file names in history.
f'--jq=[.[] | select(.workflowName == "{WORKFLOW_NAME}")][0]',
],
text=True,
)
except sp.CalledProcessError as e:
eprint(f"failed to run github command: {e}")
return
try:
latest = json.loads(latest_job)
eprint("latest job: ", json.dumps(latest, indent=4))
except json.JSONDecodeError as e:
eprint(f"failed to decode json '{latest_job}', {e}")
return
if not download:
eprint("--download not specified, returning")
return
job_id = latest.get("databaseId")
if job_id is None:
eprint("skipping download step")
return
sp.run(
["gh", "run", "download", str(job_id), f"--pattern={ARTIFACT_GLOB}"],
check=False,
)
if not extract:
eprint("skipping extraction step")
return
# Find the baseline with the most recent timestamp. GH downloads the files to e.g.
# `some-dirname/some-dirname.tar.xz`, so just glob the whole thing together.
candidate_baselines = glob(f"{ARTIFACT_GLOB}/{ARTIFACT_GLOB}")
if len(candidate_baselines) == 0:
eprint("no possible baseline directories found")
return
candidate_baselines.sort(reverse=True)
baseline_archive = candidate_baselines[0]
eprint(f"extracting {baseline_archive}")
sp.run(["tar", "xJvf", baseline_archive], check=True)
eprint("baseline extracted successfully")
def check_iai_regressions(args: list[str]):
"""Find regressions in iai summary.json files, exit with failure if any are
found.
"""
iai_home_str = "iai-home"
pr_number = None
while len(args) > 0:
match args:
case ["--home", home, *rest]:
iai_home_str = home
args = rest
case ["--allow-pr-override", pr_num, *rest]:
pr_number = pr_num
args = rest
case _:
eprint(USAGE)
exit(1)
iai_home = Path(iai_home_str)
found_summaries = False
regressions: list[dict] = []
for summary_path in iglob("**/summary.json", root_dir=iai_home, recursive=True):
found_summaries = True
with open(iai_home / summary_path, "r") as f:
summary = json.load(f)
summary_regs = []
run = summary["callgrind_summary"]["callgrind_run"]
fname = summary["function_name"]
id = summary["id"]
name_entry = {"name": f"{fname}.{id}"}
for segment in run["segments"]:
summary_regs.extend(segment["regressions"])
summary_regs.extend(run["total"]["regressions"])
regressions.extend(name_entry | reg for reg in summary_regs)
if not found_summaries:
eprint(f"did not find any summary.json files within {iai_home}")
exit(1)
if len(regressions) == 0:
eprint("No regressions found")
return
eprint("Found regressions:", json.dumps(regressions, indent=4))
if pr_number is not None:
pr = PrInfo.load(pr_number)
if pr.contains_directive(REGRESSION_DIRECTIVE):
eprint("PR allows regressions, returning")
return
exit(1)
def main():
match sys.argv[1:]:
case ["generate-matrix"]:
ctx = Context()
output = ctx.make_workflow_output()
print(f"matrix={output}")
case ["locate-baseline", *flags]:
locate_baseline(flags)
case ["check-regressions", *args]:
check_iai_regressions(args)
case ["--help" | "-h"]:
print(USAGE)
exit()
case _:
eprint(USAGE)
exit(1)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,15 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev ca-certificates \
gcc-aarch64-linux-gnu m4 make libc6-dev-arm64-cross \
qemu-user-static
ENV TOOLCHAIN_PREFIX=aarch64-linux-gnu-
ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="$TOOLCHAIN_PREFIX"gcc \
CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER=qemu-aarch64-static \
AR_aarch64_unknown_linux_gnu="$TOOLCHAIN_PREFIX"ar \
CC_aarch64_unknown_linux_gnu="$TOOLCHAIN_PREFIX"gcc \
QEMU_LD_PREFIX=/usr/aarch64-linux-gnu \
RUST_TEST_THREADS=1

View file

@ -0,0 +1,14 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev ca-certificates \
gcc-arm-linux-gnueabi libc6-dev-armel-cross qemu-user-static
ENV TOOLCHAIN_PREFIX=arm-linux-gnueabi-
ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABI_LINKER="$TOOLCHAIN_PREFIX"gcc \
CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABI_RUNNER=qemu-arm-static \
AR_arm_unknown_linux_gnueabi="$TOOLCHAIN_PREFIX"ar \
CC_arm_unknown_linux_gnueabi="$TOOLCHAIN_PREFIX"gcc \
QEMU_LD_PREFIX=/usr/arm-linux-gnueabi \
RUST_TEST_THREADS=1

View file

@ -0,0 +1,14 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev ca-certificates \
gcc-arm-linux-gnueabihf libc6-dev-armhf-cross qemu-user-static
ENV TOOLCHAIN_PREFIX=arm-linux-gnueabihf-
ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABIHF_LINKER="$TOOLCHAIN_PREFIX"gcc \
CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABIHF_RUNNER=qemu-arm-static \
AR_arm_unknown_linux_gnueabihf="$TOOLCHAIN_PREFIX"ar \
CC_arm_unknown_linux_gnueabihf="$TOOLCHAIN_PREFIX"gcc \
QEMU_LD_PREFIX=/usr/arm-linux-gnueabihf \
RUST_TEST_THREADS=1

View file

@ -0,0 +1,14 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev ca-certificates \
gcc-arm-linux-gnueabihf libc6-dev-armhf-cross qemu-user-static
ENV TOOLCHAIN_PREFIX=arm-linux-gnueabihf-
ENV CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER="$TOOLCHAIN_PREFIX"gcc \
CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_RUNNER=qemu-arm-static \
AR_armv7_unknown_linux_gnueabihf="$TOOLCHAIN_PREFIX"ar \
CC_armv7_unknown_linux_gnueabihf="$TOOLCHAIN_PREFIX"gcc \
QEMU_LD_PREFIX=/usr/arm-linux-gnueabihf \
RUST_TEST_THREADS=1

View file

@ -0,0 +1,5 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc-multilib m4 make libc6-dev ca-certificates

View file

@ -0,0 +1,5 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc-multilib m4 make libc6-dev ca-certificates

View file

@ -0,0 +1,13 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev qemu-user-static ca-certificates \
gcc-14-loongarch64-linux-gnu libc6-dev-loong64-cross
ENV CARGO_TARGET_LOONGARCH64_UNKNOWN_LINUX_GNU_LINKER=loongarch64-linux-gnu-gcc-14 \
CARGO_TARGET_LOONGARCH64_UNKNOWN_LINUX_GNU_RUNNER=qemu-loongarch64-static \
AR_loongarch64_unknown_linux_gnu=loongarch64-linux-gnu-ar \
CC_loongarch64_unknown_linux_gnu=loongarch64-linux-gnu-gcc-14 \
QEMU_LD_PREFIX=/usr/loongarch64-linux-gnu \
RUST_TEST_THREADS=1

View file

@ -0,0 +1,15 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev ca-certificates \
gcc-mips-linux-gnu libc6-dev-mips-cross \
binfmt-support qemu-user-static qemu-system-mips
ENV TOOLCHAIN_PREFIX=mips-linux-gnu-
ENV CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_LINKER="$TOOLCHAIN_PREFIX"gcc \
CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_RUNNER=qemu-mips-static \
AR_mips_unknown_linux_gnu="$TOOLCHAIN_PREFIX"ar \
CC_mips_unknown_linux_gnu="$TOOLCHAIN_PREFIX"gcc \
QEMU_LD_PREFIX=/usr/mips-linux-gnu \
RUST_TEST_THREADS=1

View file

@ -0,0 +1,19 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates \
gcc \
gcc-mips64-linux-gnuabi64 \
libc6-dev \
libc6-dev-mips64-cross \
qemu-user-static \
qemu-system-mips
ENV TOOLCHAIN_PREFIX=mips64-linux-gnuabi64-
ENV CARGO_TARGET_MIPS64_UNKNOWN_LINUX_GNUABI64_LINKER="$TOOLCHAIN_PREFIX"gcc \
CARGO_TARGET_MIPS64_UNKNOWN_LINUX_GNUABI64_RUNNER=qemu-mips64-static \
AR_mips64_unknown_linux_gnuabi64="$TOOLCHAIN_PREFIX"ar \
CC_mips64_unknown_linux_gnuabi64="$TOOLCHAIN_PREFIX"gcc \
QEMU_LD_PREFIX=/usr/mips64-linux-gnuabi64 \
RUST_TEST_THREADS=1

View file

@ -0,0 +1,18 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates \
gcc \
gcc-mips64el-linux-gnuabi64 \
libc6-dev \
libc6-dev-mips64el-cross \
qemu-user-static
ENV TOOLCHAIN_PREFIX=mips64el-linux-gnuabi64-
ENV CARGO_TARGET_MIPS64EL_UNKNOWN_LINUX_GNUABI64_LINKER="$TOOLCHAIN_PREFIX"gcc \
CARGO_TARGET_MIPS64EL_UNKNOWN_LINUX_GNUABI64_RUNNER=qemu-mips64el-static \
AR_mips64el_unknown_linux_gnuabi64="$TOOLCHAIN_PREFIX"ar \
CC_mips64el_unknown_linux_gnuabi64="$TOOLCHAIN_PREFIX"gcc \
QEMU_LD_PREFIX=/usr/mips64el-linux-gnuabi64 \
RUST_TEST_THREADS=1

View file

@ -0,0 +1,15 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev ca-certificates \
gcc-mipsel-linux-gnu libc6-dev-mipsel-cross \
binfmt-support qemu-user-static
ENV TOOLCHAIN_PREFIX=mipsel-linux-gnu-
ENV CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_GNU_LINKER="$TOOLCHAIN_PREFIX"gcc \
CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_GNU_RUNNER=qemu-mipsel-static \
AR_mipsel_unknown_linux_gnu="$TOOLCHAIN_PREFIX"ar \
CC_mipsel_unknown_linux_gnu="$TOOLCHAIN_PREFIX"gcc \
QEMU_LD_PREFIX=/usr/mipsel-linux-gnu \
RUST_TEST_THREADS=1

View file

@ -0,0 +1,15 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev qemu-user-static ca-certificates \
gcc-powerpc-linux-gnu libc6-dev-powerpc-cross \
qemu-system-ppc
ENV TOOLCHAIN_PREFIX=powerpc-linux-gnu-
ENV CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_LINKER="$TOOLCHAIN_PREFIX"gcc \
CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_RUNNER=qemu-ppc-static \
AR_powerpc_unknown_linux_gnu="$TOOLCHAIN_PREFIX"ar \
CC_powerpc_unknown_linux_gnu="$TOOLCHAIN_PREFIX"gcc \
QEMU_LD_PREFIX=/usr/powerpc-linux-gnu \
RUST_TEST_THREADS=1

View file

@ -0,0 +1,15 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev ca-certificates \
gcc-powerpc64-linux-gnu libc6-dev-ppc64-cross \
binfmt-support qemu-user-static qemu-system-ppc
ENV TOOLCHAIN_PREFIX=powerpc64-linux-gnu-
ENV CARGO_TARGET_POWERPC64_UNKNOWN_LINUX_GNU_LINKER="$TOOLCHAIN_PREFIX"gcc \
CARGO_TARGET_POWERPC64_UNKNOWN_LINUX_GNU_RUNNER=qemu-ppc64-static \
AR_powerpc64_unknown_linux_gnu="$TOOLCHAIN_PREFIX"ar \
CC_powerpc64_unknown_linux_gnu="$TOOLCHAIN_PREFIX"gcc \
QEMU_LD_PREFIX=/usr/powerpc64-linux-gnu \
RUST_TEST_THREADS=1

View file

@ -0,0 +1,16 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev qemu-user-static ca-certificates \
gcc-powerpc64le-linux-gnu libc6-dev-ppc64el-cross \
qemu-system-ppc
ENV TOOLCHAIN_PREFIX=powerpc64le-linux-gnu-
ENV CARGO_TARGET_POWERPC64LE_UNKNOWN_LINUX_GNU_LINKER="$TOOLCHAIN_PREFIX"gcc \
CARGO_TARGET_POWERPC64LE_UNKNOWN_LINUX_GNU_RUNNER=qemu-ppc64le-static \
AR_powerpc64le_unknown_linux_gnu="$TOOLCHAIN_PREFIX"ar \
CC_powerpc64le_unknown_linux_gnu="$TOOLCHAIN_PREFIX"gcc \
QEMU_CPU=POWER8 \
QEMU_LD_PREFIX=/usr/powerpc64le-linux-gnu \
RUST_TEST_THREADS=1

View file

@ -0,0 +1,15 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev qemu-user-static ca-certificates \
gcc-riscv64-linux-gnu libc6-dev-riscv64-cross \
qemu-system-riscv64
ENV TOOLCHAIN_PREFIX=riscv64-linux-gnu-
ENV CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_LINKER="$TOOLCHAIN_PREFIX"gcc \
CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_RUNNER=qemu-riscv64-static \
AR_riscv64gc_unknown_linux_gnu="$TOOLCHAIN_PREFIX"ar \
CC_riscv64gc_unknown_linux_gnu="$TOOLCHAIN_PREFIX"gcc \
QEMU_LD_PREFIX=/usr/riscv64-linux-gnu \
RUST_TEST_THREADS=1

View file

@ -0,0 +1,9 @@
ARG IMAGE=ubuntu:24.04
FROM $IMAGE
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev ca-certificates \
gcc-arm-none-eabi \
libnewlib-arm-none-eabi
ENV BUILD_ONLY=1

View file

@ -0,0 +1,9 @@
ARG IMAGE=ubuntu:24.04
FROM $IMAGE
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev ca-certificates \
gcc-arm-none-eabi \
libnewlib-arm-none-eabi
ENV BUILD_ONLY=1

View file

@ -0,0 +1,9 @@
ARG IMAGE=ubuntu:24.04
FROM $IMAGE
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev ca-certificates \
gcc-arm-none-eabi \
libnewlib-arm-none-eabi
ENV BUILD_ONLY=1

View file

@ -0,0 +1,9 @@
ARG IMAGE=ubuntu:24.04
FROM $IMAGE
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc libc6-dev ca-certificates \
gcc-arm-none-eabi \
libnewlib-arm-none-eabi
ENV BUILD_ONLY=1

View file

@ -0,0 +1,5 @@
FROM ubuntu:24.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc m4 make libc6-dev ca-certificates

View file

@ -0,0 +1,56 @@
#!/bin/bash
# Small script to run tests for a target (or all targets) inside all the
# respective docker images.
set -euxo pipefail
host_arch="$(uname -m | sed 's/arm64/aarch64/')"
run() {
local target=$1
echo "testing target: $target"
target_arch="$(echo "$target" | cut -d'-' -f1)"
emulated=""
if [ "$target_arch" != "$host_arch" ]; then
emulated=1
echo "target is emulated"
fi
# This directory needs to exist before calling docker, otherwise docker will create it but it
# will be owned by root
mkdir -p target
set_env="HOME=/tmp PATH=\$PATH:/rust/bin:/cargo/bin"
docker build -t "libm-$target" "ci/docker/$target"
docker run \
--rm \
--user "$(id -u):$(id -g)" \
-e CI \
-e RUSTFLAGS \
-e CARGO_TERM_COLOR \
-e CARGO_HOME=/cargo \
-e CARGO_TARGET_DIR=/target \
-e "EMULATED=$emulated" \
-v "${HOME}/.cargo:/cargo" \
-v "$(pwd)/target:/target" \
-v "$(pwd):/checkout:ro" \
-v "$(rustc --print sysroot):/rust:ro" \
--init \
-w /checkout \
"libm-$target" \
sh -c "$set_env exec ci/run.sh $target"
}
if [ -z "$1" ]; then
echo "running tests for all targets"
for d in ci/docker/*; do
run $d
done
else
run $1
fi

View file

@ -0,0 +1,130 @@
#!/bin/sh
set -eux
export RUST_BACKTRACE="${RUST_BACKTRACE:-full}"
export NEXTEST_STATUS_LEVEL=all
target="${1:-}"
flags=""
if [ -z "$target" ]; then
host_target=$(rustc -vV | awk '/^host/ { print $2 }')
echo "Defaulted to host target $host_target"
target="$host_target"
fi
# We enumerate features manually.
flags="$flags --no-default-features"
# Enable arch-specific routines when available.
flags="$flags --features arch"
# Always enable `unstable-float` since it expands available API but does not
# change any implementations.
flags="$flags --features unstable-float"
# We need to specifically skip tests for musl-math-sys on systems that can't
# build musl since otherwise `--all` will activate it.
case "$target" in
# Can't build at all on MSVC, WASM, or thumb
*windows-msvc*) flags="$flags --exclude musl-math-sys" ;;
*wasm*) flags="$flags --exclude musl-math-sys" ;;
*thumb*) flags="$flags --exclude musl-math-sys" ;;
# We can build musl on MinGW but running tests gets a stack overflow
*windows-gnu*) ;;
# FIXME(#309): LE PPC crashes calling the musl version of some functions. It
# seems like a qemu bug but should be investigated further at some point.
# See <https://github.com/rust-lang/libm/issues/309>.
*powerpc64le*) ;;
# Everything else gets musl enabled
*) flags="$flags --features libm-test/build-musl" ;;
esac
# Configure which targets test against MPFR
case "$target" in
# MSVC cannot link MPFR
*windows-msvc*) ;;
# FIXME: MinGW should be able to build MPFR, but setup in CI is nontrivial.
*windows-gnu*) ;;
# Targets that aren't cross compiled in CI work fine
aarch64*apple*) flags="$flags --features libm-test/build-mpfr" ;;
aarch64*linux*) flags="$flags --features libm-test/build-mpfr" ;;
i586*) flags="$flags --features libm-test/build-mpfr --features gmp-mpfr-sys/force-cross" ;;
i686*) flags="$flags --features libm-test/build-mpfr" ;;
x86_64*) flags="$flags --features libm-test/build-mpfr" ;;
esac
# FIXME: `STATUS_DLL_NOT_FOUND` testing macros on CI.
# <https://github.com/rust-lang/rust/issues/128944>
case "$target" in
*windows-gnu) flags="$flags --exclude libm-macros" ;;
esac
# Make sure we can build with overriding features.
cargo check -p libm --no-default-features
if [ "${BUILD_ONLY:-}" = "1" ]; then
# If we are on targets that can't run tests, verify that we can build.
cmd="cargo build --target $target --package libm"
$cmd
$cmd --features unstable-intrinsics
echo "can't run tests on $target; skipping"
exit
fi
flags="$flags --all --target $target"
cmd="cargo test $flags"
profile="--profile"
# If nextest is available, use that
command -v cargo-nextest && nextest=1 || nextest=0
if [ "$nextest" = "1" ]; then
# Workaround for https://github.com/nextest-rs/nextest/issues/2066
if [ -f /.dockerenv ]; then
cfg_file="/tmp/nextest-config.toml"
echo "[store]" >> "$cfg_file"
echo "dir = \"$CARGO_TARGET_DIR/nextest\"" >> "$cfg_file"
cfg_flag="--config-file $cfg_file"
fi
cmd="cargo nextest run ${cfg_flag:-} --max-fail=10 $flags"
profile="--cargo-profile"
fi
# Test once without intrinsics
$cmd
# Run doctests if they were excluded by nextest
[ "$nextest" = "1" ] && cargo test --doc $flags
# Exclude the macros and utile crates from the rest of the tests to save CI
# runtime, they shouldn't have anything feature- or opt-level-dependent.
cmd="$cmd --exclude util --exclude libm-macros"
# Test once with intrinsics enabled
$cmd --features unstable-intrinsics
$cmd --features unstable-intrinsics --benches
# Test the same in release mode, which also increases coverage. Also ensure
# the soft float routines are checked.
$cmd "$profile" release-checked
$cmd "$profile" release-checked --features force-soft-floats
$cmd "$profile" release-checked --features unstable-intrinsics
$cmd "$profile" release-checked --features unstable-intrinsics --benches
# Ensure that the routines do not panic.
#
# `--tests` must be passed because no-panic is only enabled as a dev
# dependency. The `release-opt` profile must be used to enable LTO and a
# single CGU.
ENSURE_NO_PANIC=1 cargo build \
-p libm \
--target "$target" \
--no-default-features \
--features unstable-float \
--tests \
--profile release-opt

View file

@ -0,0 +1,38 @@
[package]
name = "cb"
version = "0.1.0"
authors = ["Jorge Aparicio <jorge@japaric.io>"]
edition = "2021"
publish = false
[lib]
crate-type = ["staticlib"]
test = false
bench = false
[features]
default = ["arch", "compiler-builtins", "unstable-float"]
# Copied from `libm`'s root `Cargo.toml`'
arch = []
compiler-builtins = []
unstable-float = []
[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = [
"cfg(arch_enabled)",
"cfg(assert_no_panic)",
"cfg(intrinsics_enabled)",
'cfg(feature, values("force-soft-floats"))',
'cfg(feature, values("unstable"))',
'cfg(feature, values("unstable-intrinsics"))',
'cfg(feature, values("unstable-public-internals"))',
] }
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
codegen-units = 1
lto = "fat"

View file

@ -0,0 +1,8 @@
#[path = "../../libm/configure.rs"]
mod configure;
fn main() {
println!("cargo:rerun-if-changed=../../libm/configure.rs");
let cfg = configure::Config::from_env();
configure::emit_libm_config(&cfg);
}

View file

@ -0,0 +1,17 @@
//! Fake compiler-builtins crate
//!
//! This is used to test that we can source import `libm` into the compiler-builtins crate.
//! Additionally, it provides a `#[no_mangle]` C API that can be easier to inspect than the
//! default `.rlib`.
#![compiler_builtins]
#![feature(core_intrinsics)]
#![feature(compiler_builtins)]
#![feature(f16)]
#![feature(f128)]
#![allow(internal_features)]
#![no_std]
mod math;
// Required for macro paths.
use math::libm::support;

View file

@ -0,0 +1,182 @@
use core::ffi::c_int;
#[allow(dead_code)]
#[allow(clippy::all)] // We don't get `libm`'s list of `allow`s, so just ignore Clippy.
#[allow(unused_imports)]
#[path = "../../../libm/src/math/mod.rs"]
pub mod libm;
/// Mark functions `#[no_mangle]` and with the C ABI.
macro_rules! no_mangle {
($( $name:ident( $($tt:tt)+ ) -> $ret:ty; )+) => {
$( no_mangle!(@inner $name( $($tt)+ ) -> $ret); )+
};
// Handle simple functions with single return types
(@inner $name:ident( $($arg:ident: $aty:ty),+ ) -> $ret:ty) => {
#[unsafe(no_mangle)]
extern "C" fn $name($($arg: $aty),+) -> $ret {
libm::$name($($arg),+)
}
};
// Functions with `&mut` return values need to be handled differently, use `|` to
// separate inputs vs. outputs.
(
@inner $name:ident( $($arg:ident: $aty:ty),+ | $($rarg:ident: $rty:ty),+) -> $ret:ty
) => {
#[unsafe(no_mangle)]
extern "C" fn $name($($arg: $aty,)+ $($rarg: $rty),+) -> $ret {
let ret;
(ret, $(*$rarg),+) = libm::$name($($arg),+);
ret
}
};
}
no_mangle! {
frexp(x: f64 | y: &mut c_int) -> f64;
frexpf(x: f32 | y: &mut c_int) -> f32;
acos(x: f64) -> f64;
acosf(x: f32) -> f32;
acosh(x: f64) -> f64;
acoshf(x: f32) -> f32;
asin(x: f64) -> f64;
asinf(x: f32) -> f32;
asinh(x: f64) -> f64;
asinhf(x: f32) -> f32;
atan(x: f64) -> f64;
atan2(x: f64, y: f64) -> f64;
atan2f(x: f32, y: f32) -> f32;
atanf(x: f32) -> f32;
atanh(x: f64) -> f64;
atanhf(x: f32) -> f32;
cbrt(x: f64) -> f64;
cbrtf(x: f32) -> f32;
ceil(x: f64) -> f64;
ceilf(x: f32) -> f32;
ceilf128(x: f128) -> f128;
ceilf16(x: f16) -> f16;
copysign(x: f64, y: f64) -> f64;
copysignf(x: f32, y: f32) -> f32;
copysignf128(x: f128, y: f128) -> f128;
copysignf16(x: f16, y: f16) -> f16;
cos(x: f64) -> f64;
cosf(x: f32) -> f32;
cosh(x: f64) -> f64;
coshf(x: f32) -> f32;
erf(x: f64) -> f64;
erfc(x: f64) -> f64;
erfcf(x: f32) -> f32;
erff(x: f32) -> f32;
exp(x: f64) -> f64;
exp10(x: f64) -> f64;
exp10f(x: f32) -> f32;
exp2(x: f64) -> f64;
exp2f(x: f32) -> f32;
expf(x: f32) -> f32;
expm1(x: f64) -> f64;
expm1f(x: f32) -> f32;
fabs(x: f64) -> f64;
fabsf(x: f32) -> f32;
fabsf128(x: f128) -> f128;
fabsf16(x: f16) -> f16;
fdim(x: f64, y: f64) -> f64;
fdimf(x: f32, y: f32) -> f32;
fdimf128(x: f128, y: f128) -> f128;
fdimf16(x: f16, y: f16) -> f16;
floor(x: f64) -> f64;
floorf(x: f32) -> f32;
floorf128(x: f128) -> f128;
floorf16(x: f16) -> f16;
fma(x: f64, y: f64, z: f64) -> f64;
fmaf(x: f32, y: f32, z: f32) -> f32;
fmax(x: f64, y: f64) -> f64;
fmaxf(x: f32, y: f32) -> f32;
fmin(x: f64, y: f64) -> f64;
fminf(x: f32, y: f32) -> f32;
fmod(x: f64, y: f64) -> f64;
fmodf(x: f32, y: f32) -> f32;
hypot(x: f64, y: f64) -> f64;
hypotf(x: f32, y: f32) -> f32;
ilogb(x: f64) -> c_int;
ilogbf(x: f32) -> c_int;
j0(x: f64) -> f64;
j0f(x: f32) -> f32;
j1(x: f64) -> f64;
j1f(x: f32) -> f32;
jn(x: c_int, y: f64) -> f64;
jnf(x: c_int, y: f32) -> f32;
ldexp(x: f64, y: c_int) -> f64;
ldexpf(x: f32, y: c_int) -> f32;
lgamma(x: f64) -> f64;
lgamma_r(x: f64 | r: &mut c_int) -> f64;
lgammaf(x: f32) -> f32;
lgammaf_r(x: f32 | r: &mut c_int) -> f32;
log(x: f64) -> f64;
log10(x: f64) -> f64;
log10f(x: f32) -> f32;
log1p(x: f64) -> f64;
log1pf(x: f32) -> f32;
log2(x: f64) -> f64;
log2f(x: f32) -> f32;
logf(x: f32) -> f32;
modf(x: f64 | r: &mut f64) -> f64;
modff(x: f32 | r: &mut f32) -> f32;
nextafter(x: f64, y: f64) -> f64;
nextafterf(x: f32, y: f32) -> f32;
pow(x: f64, y: f64) -> f64;
powf(x: f32, y: f32) -> f32;
remainder(x: f64, y: f64) -> f64;
remainderf(x: f32, y: f32) -> f32;
remquo(x: f64, y: f64 | q: &mut c_int) -> f64;
remquof(x: f32, y: f32 | q: &mut c_int) -> f32;
rint(x: f64) -> f64;
rintf(x: f32) -> f32;
rintf128(x: f128) -> f128;
rintf16(x: f16) -> f16;
round(x: f64) -> f64;
roundf(x: f32) -> f32;
scalbn(x: f64, y: c_int) -> f64;
scalbnf(x: f32, y: c_int) -> f32;
sin(x: f64) -> f64;
sinf(x: f32) -> f32;
sinh(x: f64) -> f64;
sinhf(x: f32) -> f32;
sqrt(x: f64) -> f64;
sqrtf(x: f32) -> f32;
tan(x: f64) -> f64;
tanf(x: f32) -> f32;
tanh(x: f64) -> f64;
tanhf(x: f32) -> f32;
tgamma(x: f64) -> f64;
tgammaf(x: f32) -> f32;
trunc(x: f64) -> f64;
truncf(x: f32) -> f32;
truncf128(x: f128) -> f128;
truncf16(x: f16) -> f16;
y0(x: f64) -> f64;
y0f(x: f32) -> f32;
y1(x: f64) -> f64;
y1f(x: f32) -> f32;
yn(x: c_int, y: f64) -> f64;
ynf(x: c_int, y: f32) -> f32;
}
/* sincos has no direct return type, not worth handling in the macro */
#[unsafe(no_mangle)]
extern "C" fn sincos(x: f64, s: &mut f64, c: &mut f64) {
(*s, *c) = libm::sincos(x);
}
#[unsafe(no_mangle)]
extern "C" fn sincosf(x: f32, s: &mut f32, c: &mut f32) {
(*s, *c) = libm::sincosf(x);
}
#[panic_handler]
fn panic(_info: &core::panic::PanicInfo) -> ! {
loop {}
}

View file

@ -0,0 +1,21 @@
[package]
name = "libm-macros"
version = "0.1.0"
edition = "2024"
publish = false
[lib]
proc-macro = true
[dependencies]
heck = "0.5.0"
proc-macro2 = "1.0.94"
quote = "1.0.40"
syn = { version = "2.0.100", features = ["full", "extra-traits", "visit-mut"] }
[lints.rust]
# Values used during testing
unexpected_cfgs = { level = "warn", check-cfg = [
'cfg(f16_enabled)',
'cfg(f128_enabled)',
] }

View file

@ -0,0 +1,154 @@
use heck::ToUpperCamelCase;
use proc_macro2 as pm2;
use proc_macro2::{Ident, Span};
use quote::quote;
use syn::spanned::Spanned;
use syn::{Fields, ItemEnum, Variant};
use crate::{ALL_OPERATIONS, base_name};
/// Implement `#[function_enum]`, see documentation in `lib.rs`.
pub fn function_enum(
mut item: ItemEnum,
attributes: pm2::TokenStream,
) -> syn::Result<pm2::TokenStream> {
expect_empty_enum(&item)?;
let attr_span = attributes.span();
let mut attr = attributes.into_iter();
// Attribute should be the identifier of the `BaseName` enum.
let Some(tt) = attr.next() else {
return Err(syn::Error::new(attr_span, "expected one attribute"));
};
let pm2::TokenTree::Ident(base_enum) = tt else {
return Err(syn::Error::new(tt.span(), "expected an identifier"));
};
if let Some(tt) = attr.next() {
return Err(syn::Error::new(tt.span(), "unexpected token after identifier"));
}
let enum_name = &item.ident;
let mut as_str_arms = Vec::new();
let mut from_str_arms = Vec::new();
let mut base_arms = Vec::new();
for func in ALL_OPERATIONS.iter() {
let fn_name = func.name;
let ident = Ident::new(&fn_name.to_upper_camel_case(), Span::call_site());
let bname_ident = Ident::new(&base_name(fn_name).to_upper_camel_case(), Span::call_site());
// Match arm for `fn as_str(self)` matcher
as_str_arms.push(quote! { Self::#ident => #fn_name });
from_str_arms.push(quote! { #fn_name => Self::#ident });
// Match arm for `fn base_name(self)` matcher
base_arms.push(quote! { Self::#ident => #base_enum::#bname_ident });
let variant =
Variant { attrs: Vec::new(), ident, fields: Fields::Unit, discriminant: None };
item.variants.push(variant);
}
let variants = item.variants.iter();
let res = quote! {
// Instantiate the enum
#item
impl #enum_name {
/// All variants of this enum.
pub const ALL: &[Self] = &[
#( Self::#variants, )*
];
/// The stringified version of this function name.
pub const fn as_str(self) -> &'static str {
match self {
#( #as_str_arms , )*
}
}
/// If `s` is the name of a function, return it.
pub fn from_str(s: &str) -> Option<Self> {
let ret = match s {
#( #from_str_arms , )*
_ => return None,
};
Some(ret)
}
/// The base name enum for this function.
pub const fn base_name(self) -> #base_enum {
match self {
#( #base_arms, )*
}
}
/// Return information about this operation.
pub fn math_op(self) -> &'static crate::op::MathOpInfo {
crate::op::ALL_OPERATIONS.iter().find(|op| op.name == self.as_str()).unwrap()
}
}
};
Ok(res)
}
/// Implement `#[base_name_enum]`, see documentation in `lib.rs`.
pub fn base_name_enum(
mut item: ItemEnum,
attributes: pm2::TokenStream,
) -> syn::Result<pm2::TokenStream> {
expect_empty_enum(&item)?;
if !attributes.is_empty() {
let sp = attributes.span();
return Err(syn::Error::new(sp.span(), "no attributes expected"));
}
let mut base_names: Vec<_> = ALL_OPERATIONS.iter().map(|func| base_name(func.name)).collect();
base_names.sort_unstable();
base_names.dedup();
let item_name = &item.ident;
let mut as_str_arms = Vec::new();
for base_name in base_names {
let ident = Ident::new(&base_name.to_upper_camel_case(), Span::call_site());
// Match arm for `fn as_str(self)` matcher
as_str_arms.push(quote! { Self::#ident => #base_name });
let variant =
Variant { attrs: Vec::new(), ident, fields: Fields::Unit, discriminant: None };
item.variants.push(variant);
}
let res = quote! {
// Instantiate the enum
#item
impl #item_name {
/// The stringified version of this base name.
pub const fn as_str(self) -> &'static str {
match self {
#( #as_str_arms ),*
}
}
}
};
Ok(res)
}
/// Verify that an enum is empty, otherwise return an error
fn expect_empty_enum(item: &ItemEnum) -> syn::Result<()> {
if !item.variants.is_empty() {
Err(syn::Error::new(item.variants.span(), "expected an empty enum"))
} else {
Ok(())
}
}

View file

@ -0,0 +1,446 @@
mod enums;
mod parse;
mod shared;
use parse::{Invocation, StructuredInput};
use proc_macro as pm;
use proc_macro2::{self as pm2, Span};
use quote::{ToTokens, quote};
pub(crate) use shared::{ALL_OPERATIONS, FloatTy, MathOpInfo, Ty};
use syn::spanned::Spanned;
use syn::visit_mut::VisitMut;
use syn::{Ident, ItemEnum};
const KNOWN_TYPES: &[&str] = &["FTy", "CFn", "CArgs", "CRet", "RustFn", "RustArgs", "RustRet"];
/// Populate an enum with a variant representing function. Names are in upper camel case.
///
/// Applied to an empty enum. Expects one attribute `#[function_enum(BaseName)]` that provides
/// the name of the `BaseName` enum.
#[proc_macro_attribute]
pub fn function_enum(attributes: pm::TokenStream, tokens: pm::TokenStream) -> pm::TokenStream {
let item = syn::parse_macro_input!(tokens as ItemEnum);
let res = enums::function_enum(item, attributes.into());
match res {
Ok(ts) => ts,
Err(e) => e.into_compile_error(),
}
.into()
}
/// Create an enum representing all possible base names, with names in upper camel case.
///
/// Applied to an empty enum.
#[proc_macro_attribute]
pub fn base_name_enum(attributes: pm::TokenStream, tokens: pm::TokenStream) -> pm::TokenStream {
let item = syn::parse_macro_input!(tokens as ItemEnum);
let res = enums::base_name_enum(item, attributes.into());
match res {
Ok(ts) => ts,
Err(e) => e.into_compile_error(),
}
.into()
}
/// Do something for each function present in this crate.
///
/// Takes a callback macro and invokes it multiple times, once for each function that
/// this crate exports. This makes it easy to create generic tests, benchmarks, or other checks
/// and apply it to each symbol.
///
/// Additionally, the `extra` and `fn_extra` patterns can make use of magic identifiers:
///
/// - `MACRO_FN_NAME`: gets replaced with the name of the function on that invocation.
/// - `MACRO_FN_NAME_NORMALIZED`: similar to the above, but removes sufixes so e.g. `sinf` becomes
/// `sin`, `cosf128` becomes `cos`, etc.
///
/// Invoke as:
///
/// ```
/// // Macro that is invoked once per function
/// macro_rules! callback_macro {
/// (
/// // Name of that function
/// fn_name: $fn_name:ident,
/// // The basic float type for this function (e.g. `f32`, `f64`)
/// FTy: $FTy:ty,
/// // Function signature of the C version (e.g. `fn(f32, &mut f32) -> f32`)
/// CFn: $CFn:ty,
/// // A tuple representing the C version's arguments (e.g. `(f32, &mut f32)`)
/// CArgs: $CArgs:ty,
/// // The C version's return type (e.g. `f32`)
/// CRet: $CRet:ty,
/// // Function signature of the Rust version (e.g. `fn(f32) -> (f32, f32)`)
/// RustFn: $RustFn:ty,
/// // A tuple representing the Rust version's arguments (e.g. `(f32,)`)
/// RustArgs: $RustArgs:ty,
/// // The Rust version's return type (e.g. `(f32, f32)`)
/// RustRet: $RustRet:ty,
/// // Attributes for the current function, if any
/// attrs: [$($attr:meta),*],
/// // Extra tokens passed directly (if any)
/// extra: [$extra:ident],
/// // Extra function-tokens passed directly (if any)
/// fn_extra: $fn_extra:expr,
/// ) => { };
/// }
///
/// // All fields except for `callback` are optional.
/// libm_macros::for_each_function! {
/// // The macro to invoke as a callback
/// callback: callback_macro,
/// // Which types to include either as a list (`[CFn, RustFn, RustArgs]`) or "all"
/// emit_types: all,
/// // Functions to skip, i.e. `callback` shouldn't be called at all for these.
/// skip: [sin, cos],
/// // Attributes passed as `attrs` for specific functions. For example, here the invocation
/// // with `sinf` and that with `cosf` will both get `meta1` and `meta2`, but no others will.
/// //
/// // Note that `f16_enabled` and `f128_enabled` will always get emitted regardless of whether
/// // or not this is specified.
/// attributes: [
/// #[meta1]
/// #[meta2]
/// [sinf, cosf],
/// ],
/// // Any tokens that should be passed directly to all invocations of the callback. This can
/// // be used to pass local variables or other things the macro needs access to.
/// extra: [foo],
/// // Similar to `extra`, but allow providing a pattern for only specific functions. Uses
/// // a simplified match-like syntax.
/// fn_extra: match MACRO_FN_NAME {
/// hypot | hypotf => |x| x.hypot(),
/// _ => |x| x,
/// },
/// }
/// ```
#[proc_macro]
pub fn for_each_function(tokens: pm::TokenStream) -> pm::TokenStream {
let input = syn::parse_macro_input!(tokens as Invocation);
let res = StructuredInput::from_fields(input)
.and_then(|mut s_in| validate(&mut s_in).map(|fn_list| (s_in, fn_list)))
.and_then(|(s_in, fn_list)| expand(s_in, &fn_list));
match res {
Ok(ts) => ts.into(),
Err(e) => e.into_compile_error().into(),
}
}
/// Check for any input that is structurally correct but has other problems.
///
/// Returns the list of function names that we should expand for.
fn validate(input: &mut StructuredInput) -> syn::Result<Vec<&'static MathOpInfo>> {
// Collect lists of all functions that are provied as macro inputs in various fields (only,
// skip, attributes).
let attr_mentions = input
.attributes
.iter()
.flat_map(|map_list| map_list.iter())
.flat_map(|attr_map| attr_map.names.iter());
let only_mentions = input.only.iter().flat_map(|only_list| only_list.iter());
let fn_extra_mentions =
input.fn_extra.iter().flat_map(|v| v.keys()).filter(|name| *name != "_");
let all_mentioned_fns =
input.skip.iter().chain(only_mentions).chain(attr_mentions).chain(fn_extra_mentions);
// Make sure that every function mentioned is a real function
for mentioned in all_mentioned_fns {
if !ALL_OPERATIONS.iter().any(|func| mentioned == func.name) {
let e = syn::Error::new(
mentioned.span(),
format!("unrecognized function name `{mentioned}`"),
);
return Err(e);
}
}
if !input.skip.is_empty() && input.only.is_some() {
let e = syn::Error::new(
input.only_span.unwrap(),
"only one of `skip` or `only` may be specified",
);
return Err(e);
}
// Construct a list of what we intend to expand
let mut fn_list = Vec::new();
for func in ALL_OPERATIONS.iter() {
let fn_name = func.name;
// If we have an `only` list and it does _not_ contain this function name, skip it
if input.only.as_ref().is_some_and(|only| !only.iter().any(|o| o == fn_name)) {
continue;
}
// If there is a `skip` list that contains this function name, skip it
if input.skip.iter().any(|s| s == fn_name) {
continue;
}
// Run everything else
fn_list.push(func);
}
// Types that the user would like us to provide in the macro
let mut add_all_types = false;
for ty in &input.emit_types {
let ty_name = ty.to_string();
if ty_name == "all" {
add_all_types = true;
continue;
}
// Check that all requested types are valid
if !KNOWN_TYPES.contains(&ty_name.as_str()) {
let e = syn::Error::new(
ty_name.span(),
format!("unrecognized type identifier `{ty_name}`"),
);
return Err(e);
}
}
if add_all_types {
// Ensure that if `all` was specified that nothing else was
if input.emit_types.len() > 1 {
let e = syn::Error::new(
input.emit_types_span.unwrap(),
"if `all` is specified, no other type identifiers may be given",
);
return Err(e);
}
// ...and then add all types
input.emit_types.clear();
for ty in KNOWN_TYPES {
let ident = Ident::new(ty, Span::call_site());
input.emit_types.push(ident);
}
}
if let Some(map) = &input.fn_extra {
if !map.keys().any(|key| key == "_") {
// No default provided; make sure every expected function is covered
let mut fns_not_covered = Vec::new();
for func in &fn_list {
if !map.keys().any(|key| key == func.name) {
// `name` was not mentioned in the `match` statement
fns_not_covered.push(func);
}
}
if !fns_not_covered.is_empty() {
let e = syn::Error::new(
input.fn_extra_span.unwrap(),
format!(
"`fn_extra`: no default `_` pattern specified and the following \
patterns are not covered: {fns_not_covered:#?}"
),
);
return Err(e);
}
}
};
Ok(fn_list)
}
/// Expand our structured macro input into invocations of the callback macro.
fn expand(input: StructuredInput, fn_list: &[&MathOpInfo]) -> syn::Result<pm2::TokenStream> {
let mut out = pm2::TokenStream::new();
let default_ident = Ident::new("_", Span::call_site());
let callback = input.callback;
for func in fn_list {
let fn_name = Ident::new(func.name, Span::call_site());
// Prepare attributes in an `attrs: ...` field
let mut meta_fields = Vec::new();
if let Some(attrs) = &input.attributes {
let meta_iter = attrs
.iter()
.filter(|map| map.names.contains(&fn_name))
.flat_map(|map| &map.meta)
.map(|v| v.into_token_stream());
meta_fields.extend(meta_iter);
}
// Always emit f16 and f128 meta so this doesn't need to be repeated everywhere
if func.rust_sig.args.contains(&Ty::F16) || func.rust_sig.returns.contains(&Ty::F16) {
let ts = quote! { cfg(f16_enabled) };
meta_fields.push(ts);
}
if func.rust_sig.args.contains(&Ty::F128) || func.rust_sig.returns.contains(&Ty::F128) {
let ts = quote! { cfg(f128_enabled) };
meta_fields.push(ts);
}
let meta_field = quote! { attrs: [ #( #meta_fields ),* ], };
// Prepare extra in an `extra: ...` field, running the replacer
let extra_field = match input.extra.clone() {
Some(mut extra) => {
let mut v = MacroReplace::new(func.name);
v.visit_expr_mut(&mut extra);
v.finish()?;
quote! { extra: #extra, }
}
None => pm2::TokenStream::new(),
};
// Prepare function-specific extra in a `fn_extra: ...` field, running the replacer
let fn_extra_field = match input.fn_extra {
Some(ref map) => {
let mut fn_extra =
map.get(&fn_name).or_else(|| map.get(&default_ident)).unwrap().clone();
let mut v = MacroReplace::new(func.name);
v.visit_expr_mut(&mut fn_extra);
v.finish()?;
quote! { fn_extra: #fn_extra, }
}
None => pm2::TokenStream::new(),
};
let base_fty = func.float_ty;
let c_args = &func.c_sig.args;
let c_ret = &func.c_sig.returns;
let rust_args = &func.rust_sig.args;
let rust_ret = &func.rust_sig.returns;
let mut ty_fields = Vec::new();
for ty in &input.emit_types {
let field = match ty.to_string().as_str() {
"FTy" => quote! { FTy: #base_fty, },
"CFn" => quote! { CFn: fn( #(#c_args),* ,) -> ( #(#c_ret),* ), },
"CArgs" => quote! { CArgs: ( #(#c_args),* ,), },
"CRet" => quote! { CRet: ( #(#c_ret),* ), },
"RustFn" => quote! { RustFn: fn( #(#rust_args),* ,) -> ( #(#rust_ret),* ), },
"RustArgs" => quote! { RustArgs: ( #(#rust_args),* ,), },
"RustRet" => quote! { RustRet: ( #(#rust_ret),* ), },
_ => unreachable!("checked in validation"),
};
ty_fields.push(field);
}
let new = quote! {
#callback! {
fn_name: #fn_name,
#( #ty_fields )*
#meta_field
#extra_field
#fn_extra_field
}
};
out.extend(new);
}
Ok(out)
}
/// Visitor to replace "magic" identifiers that we allow: `MACRO_FN_NAME` and
/// `MACRO_FN_NAME_NORMALIZED`.
struct MacroReplace {
fn_name: &'static str,
/// Remove the trailing `f` or `f128` to make
norm_name: String,
error: Option<syn::Error>,
}
impl MacroReplace {
fn new(name: &'static str) -> Self {
let norm_name = base_name(name);
Self { fn_name: name, norm_name: norm_name.to_owned(), error: None }
}
fn finish(self) -> syn::Result<()> {
match self.error {
Some(e) => Err(e),
None => Ok(()),
}
}
fn visit_ident_inner(&mut self, i: &mut Ident) {
let s = i.to_string();
if !s.starts_with("MACRO") || self.error.is_some() {
return;
}
match s.as_str() {
"MACRO_FN_NAME" => *i = Ident::new(self.fn_name, i.span()),
"MACRO_FN_NAME_NORMALIZED" => *i = Ident::new(&self.norm_name, i.span()),
_ => {
self.error =
Some(syn::Error::new(i.span(), format!("unrecognized meta expression `{s}`")));
}
}
}
}
impl VisitMut for MacroReplace {
fn visit_ident_mut(&mut self, i: &mut Ident) {
self.visit_ident_inner(i);
syn::visit_mut::visit_ident_mut(self, i);
}
}
/// Return the unsuffixed version of a function name; e.g. `abs` and `absf` both return `abs`,
/// `lgamma_r` and `lgammaf_r` both return `lgamma_r`.
fn base_name(name: &str) -> &str {
let known_mappings = &[
("erff", "erf"),
("erf", "erf"),
("lgammaf_r", "lgamma_r"),
("modff", "modf"),
("modf", "modf"),
];
match known_mappings.iter().find(|known| known.0 == name) {
Some(found) => found.1,
None => name
.strip_suffix("f")
.or_else(|| name.strip_suffix("f16"))
.or_else(|| name.strip_suffix("f128"))
.unwrap_or(name),
}
}
impl ToTokens for Ty {
fn to_tokens(&self, tokens: &mut pm2::TokenStream) {
let ts = match self {
Ty::F16 => quote! { f16 },
Ty::F32 => quote! { f32 },
Ty::F64 => quote! { f64 },
Ty::F128 => quote! { f128 },
Ty::I32 => quote! { i32 },
Ty::CInt => quote! { ::core::ffi::c_int },
Ty::MutF16 => quote! { &'a mut f16 },
Ty::MutF32 => quote! { &'a mut f32 },
Ty::MutF64 => quote! { &'a mut f64 },
Ty::MutF128 => quote! { &'a mut f128 },
Ty::MutI32 => quote! { &'a mut i32 },
Ty::MutCInt => quote! { &'a mut core::ffi::c_int },
};
tokens.extend(ts);
}
}
impl ToTokens for FloatTy {
fn to_tokens(&self, tokens: &mut pm2::TokenStream) {
let ts = match self {
FloatTy::F16 => quote! { f16 },
FloatTy::F32 => quote! { f32 },
FloatTy::F64 => quote! { f64 },
FloatTy::F128 => quote! { f128 },
};
tokens.extend(ts);
}
}

View file

@ -0,0 +1,258 @@
use std::collections::BTreeMap;
use proc_macro2::Span;
use quote::ToTokens;
use syn::parse::{Parse, ParseStream, Parser};
use syn::punctuated::Punctuated;
use syn::spanned::Spanned;
use syn::token::{self, Comma};
use syn::{Arm, Attribute, Expr, ExprMatch, Ident, Meta, Token, bracketed};
/// The input to our macro; just a list of `field: value` items.
#[derive(Debug)]
pub struct Invocation {
fields: Punctuated<Mapping, Comma>,
}
impl Parse for Invocation {
fn parse(input: ParseStream) -> syn::Result<Self> {
Ok(Self { fields: input.parse_terminated(Mapping::parse, Token![,])? })
}
}
/// A `key: expression` mapping with nothing else. Basically a simplified `syn::Field`.
#[derive(Debug)]
struct Mapping {
name: Ident,
_sep: Token![:],
expr: Expr,
}
impl Parse for Mapping {
fn parse(input: ParseStream) -> syn::Result<Self> {
Ok(Self { name: input.parse()?, _sep: input.parse()?, expr: input.parse()? })
}
}
/// The input provided to our proc macro, after parsing into the form we expect.
#[derive(Debug)]
pub struct StructuredInput {
/// Macro to invoke once per function
pub callback: Ident,
/// Whether or not to provide `CFn` `CArgs` `RustFn` etc. This is really only needed
/// once for crate to set up the main trait.
pub emit_types: Vec<Ident>,
/// Skip these functions
pub skip: Vec<Ident>,
/// Invoke only for these functions
pub only: Option<Vec<Ident>>,
/// Attributes that get applied to specific functions
pub attributes: Option<Vec<AttributeMap>>,
/// Extra expressions to pass to all invocations of the macro
pub extra: Option<Expr>,
/// Per-function extra expressions to pass to the macro
pub fn_extra: Option<BTreeMap<Ident, Expr>>,
// For diagnostics
pub emit_types_span: Option<Span>,
pub only_span: Option<Span>,
pub fn_extra_span: Option<Span>,
}
impl StructuredInput {
pub fn from_fields(input: Invocation) -> syn::Result<Self> {
let mut map: Vec<_> = input.fields.into_iter().collect();
let cb_expr = expect_field(&mut map, "callback")?;
let emit_types_expr = expect_field(&mut map, "emit_types").ok();
let skip_expr = expect_field(&mut map, "skip").ok();
let only_expr = expect_field(&mut map, "only").ok();
let attr_expr = expect_field(&mut map, "attributes").ok();
let extra = expect_field(&mut map, "extra").ok();
let fn_extra = expect_field(&mut map, "fn_extra").ok();
if !map.is_empty() {
Err(syn::Error::new(
map.first().unwrap().name.span(),
format!("unexpected fields {map:?}"),
))?;
}
let emit_types_span = emit_types_expr.as_ref().map(|expr| expr.span());
let emit_types = match emit_types_expr {
Some(expr) => Parser::parse2(parse_ident_or_array, expr.into_token_stream())?,
None => Vec::new(),
};
let skip = match skip_expr {
Some(expr) => Parser::parse2(parse_ident_array, expr.into_token_stream())?,
None => Vec::new(),
};
let only_span = only_expr.as_ref().map(|expr| expr.span());
let only = match only_expr {
Some(expr) => Some(Parser::parse2(parse_ident_array, expr.into_token_stream())?),
None => None,
};
let attributes = match attr_expr {
Some(expr) => {
let mut attributes = Vec::new();
let attr_exprs = Parser::parse2(parse_expr_array, expr.into_token_stream())?;
for attr in attr_exprs {
attributes.push(syn::parse2(attr.into_token_stream())?);
}
Some(attributes)
}
None => None,
};
let fn_extra_span = fn_extra.as_ref().map(|expr| expr.span());
let fn_extra = match fn_extra {
Some(expr) => Some(extract_fn_extra_field(expr)?),
None => None,
};
Ok(Self {
callback: expect_ident(cb_expr)?,
emit_types,
skip,
only,
only_span,
attributes,
extra,
fn_extra,
fn_extra_span,
emit_types_span,
})
}
}
fn extract_fn_extra_field(expr: Expr) -> syn::Result<BTreeMap<Ident, Expr>> {
let Expr::Match(mexpr) = expr else {
let e = syn::Error::new(expr.span(), "`fn_extra` expects a match expression");
return Err(e);
};
let ExprMatch { attrs, match_token: _, expr, brace_token: _, arms } = mexpr;
expect_empty_attrs(&attrs)?;
let match_on = expect_ident(*expr)?;
if match_on != "MACRO_FN_NAME" {
let e = syn::Error::new(match_on.span(), "only allowed to match on `MACRO_FN_NAME`");
return Err(e);
}
let mut res = BTreeMap::new();
for arm in arms {
let Arm { attrs, pat, guard, fat_arrow_token: _, body, comma: _ } = arm;
expect_empty_attrs(&attrs)?;
let keys = match pat {
syn::Pat::Wild(w) => vec![Ident::new("_", w.span())],
_ => Parser::parse2(parse_ident_pat, pat.into_token_stream())?,
};
if let Some(guard) = guard {
let e = syn::Error::new(guard.0.span(), "no guards allowed in this position");
return Err(e);
}
for key in keys {
let inserted = res.insert(key.clone(), *body.clone());
if inserted.is_some() {
let e = syn::Error::new(key.span(), format!("key `{key}` specified twice"));
return Err(e);
}
}
}
Ok(res)
}
fn expect_empty_attrs(attrs: &[Attribute]) -> syn::Result<()> {
if attrs.is_empty() {
return Ok(());
}
let e =
syn::Error::new(attrs.first().unwrap().span(), "no attributes allowed in this position");
Err(e)
}
/// Extract a named field from a map, raising an error if it doesn't exist.
fn expect_field(v: &mut Vec<Mapping>, name: &str) -> syn::Result<Expr> {
let pos = v.iter().position(|v| v.name == name).ok_or_else(|| {
syn::Error::new(Span::call_site(), format!("missing expected field `{name}`"))
})?;
Ok(v.remove(pos).expr)
}
/// Coerce an expression into a simple identifier.
fn expect_ident(expr: Expr) -> syn::Result<Ident> {
syn::parse2(expr.into_token_stream())
}
/// Parse either a single identifier (`foo`) or an array of identifiers (`[foo, bar, baz]`).
fn parse_ident_or_array(input: ParseStream) -> syn::Result<Vec<Ident>> {
if !input.peek(token::Bracket) {
return Ok(vec![input.parse()?]);
}
parse_ident_array(input)
}
/// Parse an array of expressions.
fn parse_expr_array(input: ParseStream) -> syn::Result<Vec<Expr>> {
let content;
let _ = bracketed!(content in input);
let fields = content.parse_terminated(Expr::parse, Token![,])?;
Ok(fields.into_iter().collect())
}
/// Parse an array of idents, e.g. `[foo, bar, baz]`.
fn parse_ident_array(input: ParseStream) -> syn::Result<Vec<Ident>> {
let content;
let _ = bracketed!(content in input);
let fields = content.parse_terminated(Ident::parse, Token![,])?;
Ok(fields.into_iter().collect())
}
/// Parse an pattern of idents, specifically `(foo | bar | baz)`.
fn parse_ident_pat(input: ParseStream) -> syn::Result<Vec<Ident>> {
if !input.peek2(Token![|]) {
return Ok(vec![input.parse()?]);
}
let fields = Punctuated::<Ident, Token![|]>::parse_separated_nonempty(input)?;
Ok(fields.into_iter().collect())
}
/// A mapping of attributes to identifiers (just a simplified `Expr`).
///
/// Expressed as:
///
/// ```ignore
/// #[meta1]
/// #[meta2]
/// [foo, bar, baz]
/// ```
#[derive(Debug)]
pub struct AttributeMap {
pub meta: Vec<Meta>,
pub names: Vec<Ident>,
}
impl Parse for AttributeMap {
fn parse(input: ParseStream) -> syn::Result<Self> {
let attrs = input.call(Attribute::parse_outer)?;
Ok(Self {
meta: attrs.into_iter().map(|a| a.meta).collect(),
names: parse_ident_array(input)?,
})
}
}

View file

@ -0,0 +1,444 @@
/* List of all functions that is shared between `libm-macros` and `libm-test`. */
use std::fmt;
use std::sync::LazyLock;
const ALL_OPERATIONS_NESTED: &[(FloatTy, Signature, Option<Signature>, &[&str])] = &[
(
// `fn(f16) -> f16`
FloatTy::F16,
Signature { args: &[Ty::F16], returns: &[Ty::F16] },
None,
&[
"ceilf16",
"fabsf16",
"floorf16",
"rintf16",
"roundevenf16",
"roundf16",
"sqrtf16",
"truncf16",
],
),
(
// `fn(f32) -> f32`
FloatTy::F32,
Signature { args: &[Ty::F32], returns: &[Ty::F32] },
None,
&[
"acosf",
"acoshf",
"asinf",
"asinhf",
"atanf",
"atanhf",
"cbrtf",
"ceilf",
"cosf",
"coshf",
"erfcf",
"erff",
"exp10f",
"exp2f",
"expf",
"expm1f",
"fabsf",
"floorf",
"j0f",
"j1f",
"lgammaf",
"log10f",
"log1pf",
"log2f",
"logf",
"rintf",
"roundevenf",
"roundf",
"sinf",
"sinhf",
"sqrtf",
"tanf",
"tanhf",
"tgammaf",
"truncf",
"y0f",
"y1f",
],
),
(
// `(f64) -> f64`
FloatTy::F64,
Signature { args: &[Ty::F64], returns: &[Ty::F64] },
None,
&[
"acos",
"acosh",
"asin",
"asinh",
"atan",
"atanh",
"cbrt",
"ceil",
"cos",
"cosh",
"erf",
"erfc",
"exp",
"exp10",
"exp2",
"expm1",
"fabs",
"floor",
"j0",
"j1",
"lgamma",
"log",
"log10",
"log1p",
"log2",
"rint",
"round",
"roundeven",
"sin",
"sinh",
"sqrt",
"tan",
"tanh",
"tgamma",
"trunc",
"y0",
"y1",
],
),
(
// `fn(f128) -> f128`
FloatTy::F128,
Signature { args: &[Ty::F128], returns: &[Ty::F128] },
None,
&[
"ceilf128",
"fabsf128",
"floorf128",
"rintf128",
"roundevenf128",
"roundf128",
"sqrtf128",
"truncf128",
],
),
(
// `(f16, f16) -> f16`
FloatTy::F16,
Signature { args: &[Ty::F16, Ty::F16], returns: &[Ty::F16] },
None,
&[
"copysignf16",
"fdimf16",
"fmaxf16",
"fmaximum_numf16",
"fmaximumf16",
"fminf16",
"fminimum_numf16",
"fminimumf16",
"fmodf16",
],
),
(
// `(f32, f32) -> f32`
FloatTy::F32,
Signature { args: &[Ty::F32, Ty::F32], returns: &[Ty::F32] },
None,
&[
"atan2f",
"copysignf",
"fdimf",
"fmaxf",
"fmaximum_numf",
"fmaximumf",
"fminf",
"fminimum_numf",
"fminimumf",
"fmodf",
"hypotf",
"nextafterf",
"powf",
"remainderf",
],
),
(
// `(f64, f64) -> f64`
FloatTy::F64,
Signature { args: &[Ty::F64, Ty::F64], returns: &[Ty::F64] },
None,
&[
"atan2",
"copysign",
"fdim",
"fmax",
"fmaximum",
"fmaximum_num",
"fmin",
"fminimum",
"fminimum_num",
"fmod",
"hypot",
"nextafter",
"pow",
"remainder",
],
),
(
// `(f128, f128) -> f128`
FloatTy::F128,
Signature { args: &[Ty::F128, Ty::F128], returns: &[Ty::F128] },
None,
&[
"copysignf128",
"fdimf128",
"fmaxf128",
"fmaximum_numf128",
"fmaximumf128",
"fminf128",
"fminimum_numf128",
"fminimumf128",
"fmodf128",
],
),
(
// `(f32, f32, f32) -> f32`
FloatTy::F32,
Signature { args: &[Ty::F32, Ty::F32, Ty::F32], returns: &[Ty::F32] },
None,
&["fmaf"],
),
(
// `(f64, f64, f64) -> f64`
FloatTy::F64,
Signature { args: &[Ty::F64, Ty::F64, Ty::F64], returns: &[Ty::F64] },
None,
&["fma"],
),
(
// `(f128, f128, f128) -> f128`
FloatTy::F128,
Signature { args: &[Ty::F128, Ty::F128, Ty::F128], returns: &[Ty::F128] },
None,
&["fmaf128"],
),
(
// `(f32) -> i32`
FloatTy::F32,
Signature { args: &[Ty::F32], returns: &[Ty::I32] },
None,
&["ilogbf"],
),
(
// `(f64) -> i32`
FloatTy::F64,
Signature { args: &[Ty::F64], returns: &[Ty::I32] },
None,
&["ilogb"],
),
(
// `(i32, f32) -> f32`
FloatTy::F32,
Signature { args: &[Ty::I32, Ty::F32], returns: &[Ty::F32] },
None,
&["jnf", "ynf"],
),
(
// `(i32, f64) -> f64`
FloatTy::F64,
Signature { args: &[Ty::I32, Ty::F64], returns: &[Ty::F64] },
None,
&["jn", "yn"],
),
(
// `(f16, i32) -> f16`
FloatTy::F16,
Signature { args: &[Ty::F16, Ty::I32], returns: &[Ty::F16] },
None,
&["ldexpf16", "scalbnf16"],
),
(
// `(f32, i32) -> f32`
FloatTy::F32,
Signature { args: &[Ty::F32, Ty::I32], returns: &[Ty::F32] },
None,
&["ldexpf", "scalbnf"],
),
(
// `(f64, i64) -> f64`
FloatTy::F64,
Signature { args: &[Ty::F64, Ty::I32], returns: &[Ty::F64] },
None,
&["ldexp", "scalbn"],
),
(
// `(f128, i32) -> f128`
FloatTy::F128,
Signature { args: &[Ty::F128, Ty::I32], returns: &[Ty::F128] },
None,
&["ldexpf128", "scalbnf128"],
),
(
// `(f32, &mut f32) -> f32` as `(f32) -> (f32, f32)`
FloatTy::F32,
Signature { args: &[Ty::F32], returns: &[Ty::F32, Ty::F32] },
Some(Signature { args: &[Ty::F32, Ty::MutF32], returns: &[Ty::F32] }),
&["modff"],
),
(
// `(f64, &mut f64) -> f64` as `(f64) -> (f64, f64)`
FloatTy::F64,
Signature { args: &[Ty::F64], returns: &[Ty::F64, Ty::F64] },
Some(Signature { args: &[Ty::F64, Ty::MutF64], returns: &[Ty::F64] }),
&["modf"],
),
(
// `(f32, &mut c_int) -> f32` as `(f32) -> (f32, i32)`
FloatTy::F32,
Signature { args: &[Ty::F32], returns: &[Ty::F32, Ty::I32] },
Some(Signature { args: &[Ty::F32, Ty::MutCInt], returns: &[Ty::F32] }),
&["frexpf", "lgammaf_r"],
),
(
// `(f64, &mut c_int) -> f64` as `(f64) -> (f64, i32)`
FloatTy::F64,
Signature { args: &[Ty::F64], returns: &[Ty::F64, Ty::I32] },
Some(Signature { args: &[Ty::F64, Ty::MutCInt], returns: &[Ty::F64] }),
&["frexp", "lgamma_r"],
),
(
// `(f32, f32, &mut c_int) -> f32` as `(f32, f32) -> (f32, i32)`
FloatTy::F32,
Signature { args: &[Ty::F32, Ty::F32], returns: &[Ty::F32, Ty::I32] },
Some(Signature { args: &[Ty::F32, Ty::F32, Ty::MutCInt], returns: &[Ty::F32] }),
&["remquof"],
),
(
// `(f64, f64, &mut c_int) -> f64` as `(f64, f64) -> (f64, i32)`
FloatTy::F64,
Signature { args: &[Ty::F64, Ty::F64], returns: &[Ty::F64, Ty::I32] },
Some(Signature { args: &[Ty::F64, Ty::F64, Ty::MutCInt], returns: &[Ty::F64] }),
&["remquo"],
),
(
// `(f32, &mut f32, &mut f32)` as `(f32) -> (f32, f32)`
FloatTy::F32,
Signature { args: &[Ty::F32], returns: &[Ty::F32, Ty::F32] },
Some(Signature { args: &[Ty::F32, Ty::MutF32, Ty::MutF32], returns: &[] }),
&["sincosf"],
),
(
// `(f64, &mut f64, &mut f64)` as `(f64) -> (f64, f64)`
FloatTy::F64,
Signature { args: &[Ty::F64], returns: &[Ty::F64, Ty::F64] },
Some(Signature { args: &[Ty::F64, Ty::MutF64, Ty::MutF64], returns: &[] }),
&["sincos"],
),
];
/// A type used in a function signature.
#[allow(dead_code)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum Ty {
F16,
F32,
F64,
F128,
I32,
CInt,
MutF16,
MutF32,
MutF64,
MutF128,
MutI32,
MutCInt,
}
/// A subset of [`Ty`] representing only floats.
#[allow(dead_code)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum FloatTy {
F16,
F32,
F64,
F128,
}
impl fmt::Display for Ty {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match self {
Ty::F16 => "f16",
Ty::F32 => "f32",
Ty::F64 => "f64",
Ty::F128 => "f128",
Ty::I32 => "i32",
Ty::CInt => "::core::ffi::c_int",
Ty::MutF16 => "&mut f16",
Ty::MutF32 => "&mut f32",
Ty::MutF64 => "&mut f64",
Ty::MutF128 => "&mut f128",
Ty::MutI32 => "&mut i32",
Ty::MutCInt => "&mut ::core::ffi::c_int",
};
f.write_str(s)
}
}
impl fmt::Display for FloatTy {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match self {
FloatTy::F16 => "f16",
FloatTy::F32 => "f32",
FloatTy::F64 => "f64",
FloatTy::F128 => "f128",
};
f.write_str(s)
}
}
/// Representation of e.g. `(f32, f32) -> f32`
#[derive(Debug, Clone)]
pub struct Signature {
pub args: &'static [Ty],
pub returns: &'static [Ty],
}
/// Combined information about a function implementation.
#[derive(Debug, Clone)]
pub struct MathOpInfo {
pub name: &'static str,
pub float_ty: FloatTy,
/// Function signature for C implementations
pub c_sig: Signature,
/// Function signature for Rust implementations
pub rust_sig: Signature,
}
/// A flat representation of `ALL_FUNCTIONS`.
pub static ALL_OPERATIONS: LazyLock<Vec<MathOpInfo>> = LazyLock::new(|| {
let mut ret = Vec::new();
for (base_fty, rust_sig, c_sig, names) in ALL_OPERATIONS_NESTED {
for name in *names {
let api = MathOpInfo {
name,
float_ty: *base_fty,
rust_sig: rust_sig.clone(),
c_sig: c_sig.clone().unwrap_or_else(|| rust_sig.clone()),
};
ret.push(api);
}
if !names.is_sorted() {
let mut sorted = (*names).to_owned();
sorted.sort_unstable();
panic!("names list is not sorted: {names:?}\nExpected: {sorted:?}");
}
}
ret.sort_by_key(|item| item.name);
ret
});

View file

@ -0,0 +1,103 @@
#![feature(f16)]
#![feature(f128)]
// `STATUS_DLL_NOT_FOUND` on i686 MinGW, not worth looking into.
#![cfg(not(all(target_arch = "x86", target_os = "windows", target_env = "gnu")))]
macro_rules! basic {
(
fn_name: $fn_name:ident,
FTy: $FTy:ty,
CFn: $CFn:ty,
CArgs: $CArgs:ty,
CRet: $CRet:ty,
RustFn: $RustFn:ty,
RustArgs: $RustArgs:ty,
RustRet: $RustRet:ty,
attrs: [$($attr:meta),*],
extra: [$($extra_tt:tt)*],
fn_extra: $fn_extra:expr,
) => {
$(#[$attr])*
#[allow(dead_code)]
pub mod $fn_name {
type FTy= $FTy;
type CFnTy<'a> = $CFn;
type RustFnTy = $RustFn;
type RustArgsTy = $RustArgs;
type RustRetTy = $RustRet;
const A: &[&str] = &[$($extra_tt)*];
fn foo(a: f32) -> f32 {
$fn_extra(a)
}
}
};
}
mod test_basic {
libm_macros::for_each_function! {
callback: basic,
emit_types: all,
skip: [sin, cos],
attributes: [
// just some random attributes
#[allow(clippy::pedantic)]
#[allow(dead_code)]
[sinf, cosf]
],
extra: ["foo", "bar"],
fn_extra: match MACRO_FN_NAME {
sin => |x| x + 2.0,
cos | cosf => |x: f32| x.MACRO_FN_NAME_NORMALIZED(),
_ => |_x| 100.0
}
}
}
macro_rules! basic_no_extra {
(
fn_name: $fn_name:ident,
attrs: [$($attr:meta),*],
) => {
$(#[$attr])*
mod $fn_name {}
};
}
mod test_basic_no_extra {
// Test with no extra, no skip, and no attributes
libm_macros::for_each_function! {
callback: basic_no_extra,
}
}
mod test_only {
// Test that only works
libm_macros::for_each_function! {
callback: basic_no_extra,
only: [sin, sinf],
}
}
macro_rules! specified_types {
(
fn_name: $fn_name:ident,
RustFn: $RustFn:ty,
RustArgs: $RustArgs:ty,
attrs: [$($attr:meta),*],
) => {
$(#[$attr])*
#[allow(dead_code)]
mod $fn_name {
type RustFnTy = $RustFn;
type RustArgsTy = $RustArgs;
}
};
}
mod test_emit_types {
// Test that we can specify a couple types to emit
libm_macros::for_each_function! {
callback: specified_types,
emit_types: [RustFn, RustArgs],
}
}

View file

@ -0,0 +1,38 @@
#[libm_macros::function_enum(BaseName)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum Identifier {}
#[libm_macros::base_name_enum]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum BaseName {}
#[test]
fn as_str() {
assert_eq!(Identifier::Sin.as_str(), "sin");
assert_eq!(Identifier::Sinf.as_str(), "sinf");
}
#[test]
fn from_str() {
assert_eq!(Identifier::from_str("sin").unwrap(), Identifier::Sin);
assert_eq!(Identifier::from_str("sinf").unwrap(), Identifier::Sinf);
}
#[test]
fn basename() {
assert_eq!(Identifier::Sin.base_name(), BaseName::Sin);
assert_eq!(Identifier::Sinf.base_name(), BaseName::Sin);
}
#[test]
fn math_op() {
assert_eq!(Identifier::Sin.math_op().float_ty, FloatTy::F64);
assert_eq!(Identifier::Sinf.math_op().float_ty, FloatTy::F32);
}
// Replicate the structure that we have in `libm-test`
mod op {
include!("../../libm-macros/src/shared.rs");
}
use op::FloatTy;

View file

@ -0,0 +1,70 @@
[package]
name = "libm-test"
version = "0.1.0"
edition = "2024"
publish = false
[features]
default = ["build-mpfr", "build-musl", "unstable-float"]
# Propagated from libm because this affects which functions we test.
unstable-float = ["libm/unstable-float", "rug?/nightly-float"]
# Generate tests which are random inputs and the outputs are calculated with
# musl libc.
build-mpfr = ["dep:rug", "dep:gmp-mpfr-sys"]
# Build our own musl for testing and benchmarks
build-musl = ["dep:musl-math-sys"]
# Enable report generation without bringing in more dependencies by default
benchmarking-reports = ["criterion/plotters", "criterion/html_reports"]
# Enable icount benchmarks (requires iai-callgrind and valgrind)
icount = ["dep:iai-callgrind"]
# Run with a reduced set of benchmarks, such as for CI
short-benchmarks = []
[dependencies]
anyhow = "1.0.97"
# This is not directly used but is required so we can enable `gmp-mpfr-sys/force-cross`.
gmp-mpfr-sys = { version = "1.6.4", optional = true, default-features = false }
iai-callgrind = { version = "0.14.0", optional = true }
indicatif = { version = "0.17.11", default-features = false }
libm = { path = "../../libm", features = ["unstable-public-internals"] }
libm-macros = { path = "../libm-macros" }
musl-math-sys = { path = "../musl-math-sys", optional = true }
paste = "1.0.15"
rand = "0.9.0"
rand_chacha = "0.9.0"
rayon = "1.10.0"
rug = { version = "1.27.0", optional = true, default-features = false, features = ["float", "integer", "std"] }
[build-dependencies]
rand = { version = "0.9.0", optional = true }
[dev-dependencies]
criterion = { version = "0.5.1", default-features = false, features = ["cargo_bench_support"] }
libtest-mimic = "0.8.1"
[[bench]]
name = "icount"
harness = false
required-features = ["icount"]
[[bench]]
name = "random"
harness = false
[[test]]
# No harness so that we can skip tests at runtime based on env. Prefixed with
# `z` so these tests get run last.
name = "z_extensive"
harness = false
[lints.rust]
# Values from the chared config.rs used by `libm` but not the test crate
unexpected_cfgs = { level = "warn", check-cfg = [
'cfg(feature, values("arch", "force-soft-floats", "unstable-intrinsics"))',
] }

View file

@ -0,0 +1,316 @@
//! Benchmarks that use `iai-cachegrind` to be reasonably CI-stable.
use std::hint::black_box;
use iai_callgrind::{library_benchmark, library_benchmark_group, main};
use libm::support::{HInt, u256};
use libm_test::generate::spaced;
use libm_test::{CheckBasis, CheckCtx, GeneratorKind, MathOp, OpRustArgs, TupleCall, op};
const BENCH_ITER_ITEMS: u64 = 500;
macro_rules! icount_benches {
(
fn_name: $fn_name:ident,
attrs: [$($_attr:meta),*],
) => {
paste::paste! {
// Construct benchmark inputs from the logspace generator.
fn [< setup_ $fn_name >]() -> Vec<OpRustArgs<op::$fn_name::Routine>> {
type Op = op::$fn_name::Routine;
let mut ctx = CheckCtx::new(
Op::IDENTIFIER,
CheckBasis::None,
GeneratorKind::QuickSpaced
);
ctx.override_iterations(BENCH_ITER_ITEMS);
let ret = spaced::get_test_cases::<Op>(&ctx).0.collect::<Vec<_>>();
println!("operation {}, {} steps", Op::NAME, ret.len());
ret
}
// Run benchmarks with the above inputs.
#[library_benchmark]
#[bench::logspace([< setup_ $fn_name >]())]
fn [< icount_bench_ $fn_name >](cases: Vec<OpRustArgs<op::$fn_name::Routine>>) {
type Op = op::$fn_name::Routine;
let f = black_box(Op::ROUTINE);
for input in cases.iter().copied() {
input.call(f);
}
}
library_benchmark_group!(
name = [< icount_bench_ $fn_name _group >];
benchmarks = [< icount_bench_ $fn_name >]
);
}
};
}
libm_macros::for_each_function! {
callback: icount_benches,
}
fn setup_u128_mul() -> Vec<(u128, u128)> {
let step = u128::MAX / 300;
let mut x = 0u128;
let mut y = 0u128;
let mut v = Vec::new();
loop {
'inner: loop {
match y.checked_add(step) {
Some(new) => y = new,
None => break 'inner,
}
v.push((x, y))
}
match x.checked_add(step) {
Some(new) => x = new,
None => break,
}
}
v
}
fn setup_u256_add() -> Vec<(u256, u256)> {
let mut v = Vec::new();
for (x, y) in setup_u128_mul() {
// square the u128 inputs to cover most of the u256 range
v.push((x.widen_mul(x), y.widen_mul(y)));
}
// Doesn't get covered by `u128:MAX^2`
v.push((u256::MAX, u256::MAX));
v
}
fn setup_u256_shift() -> Vec<(u256, u32)> {
let mut v = Vec::new();
for (x, _) in setup_u128_mul() {
let x2 = x.widen_mul(x);
for y in 0u32..256 {
v.push((x2, y));
}
}
v
}
#[library_benchmark]
#[bench::linspace(setup_u128_mul())]
fn icount_bench_u128_widen_mul(cases: Vec<(u128, u128)>) {
for (x, y) in cases.iter().copied() {
black_box(black_box(x).zero_widen_mul(black_box(y)));
}
}
library_benchmark_group!(
name = icount_bench_u128_widen_mul_group;
benchmarks = icount_bench_u128_widen_mul
);
#[library_benchmark]
#[bench::linspace(setup_u256_add())]
fn icount_bench_u256_add(cases: Vec<(u256, u256)>) {
for (x, y) in cases.iter().copied() {
black_box(black_box(x) + black_box(y));
}
}
library_benchmark_group!(
name = icount_bench_u256_add_group;
benchmarks = icount_bench_u256_add
);
#[library_benchmark]
#[bench::linspace(setup_u256_shift())]
fn icount_bench_u256_shr(cases: Vec<(u256, u32)>) {
for (x, y) in cases.iter().copied() {
black_box(black_box(x) >> black_box(y));
}
}
library_benchmark_group!(
name = icount_bench_u256_shr_group;
benchmarks = icount_bench_u256_shr
);
main!(
library_benchmark_groups =
// u256-related benchmarks
icount_bench_u128_widen_mul_group,
icount_bench_u256_add_group,
icount_bench_u256_shr_group,
// verify-apilist-start
// verify-sorted-start
icount_bench_acos_group,
icount_bench_acosf_group,
icount_bench_acosh_group,
icount_bench_acoshf_group,
icount_bench_asin_group,
icount_bench_asinf_group,
icount_bench_asinh_group,
icount_bench_asinhf_group,
icount_bench_atan2_group,
icount_bench_atan2f_group,
icount_bench_atan_group,
icount_bench_atanf_group,
icount_bench_atanh_group,
icount_bench_atanhf_group,
icount_bench_cbrt_group,
icount_bench_cbrtf_group,
icount_bench_ceil_group,
icount_bench_ceilf128_group,
icount_bench_ceilf16_group,
icount_bench_ceilf_group,
icount_bench_copysign_group,
icount_bench_copysignf128_group,
icount_bench_copysignf16_group,
icount_bench_copysignf_group,
icount_bench_cos_group,
icount_bench_cosf_group,
icount_bench_cosh_group,
icount_bench_coshf_group,
icount_bench_erf_group,
icount_bench_erfc_group,
icount_bench_erfcf_group,
icount_bench_erff_group,
icount_bench_exp10_group,
icount_bench_exp10f_group,
icount_bench_exp2_group,
icount_bench_exp2f_group,
icount_bench_exp_group,
icount_bench_expf_group,
icount_bench_expm1_group,
icount_bench_expm1f_group,
icount_bench_fabs_group,
icount_bench_fabsf128_group,
icount_bench_fabsf16_group,
icount_bench_fabsf_group,
icount_bench_fdim_group,
icount_bench_fdimf128_group,
icount_bench_fdimf16_group,
icount_bench_fdimf_group,
icount_bench_floor_group,
icount_bench_floorf128_group,
icount_bench_floorf16_group,
icount_bench_floorf_group,
icount_bench_fma_group,
icount_bench_fmaf128_group,
icount_bench_fmaf_group,
icount_bench_fmax_group,
icount_bench_fmaxf128_group,
icount_bench_fmaxf16_group,
icount_bench_fmaxf_group,
icount_bench_fmaximum_group,
icount_bench_fmaximum_num_group,
icount_bench_fmaximum_numf128_group,
icount_bench_fmaximum_numf16_group,
icount_bench_fmaximum_numf_group,
icount_bench_fmaximumf128_group,
icount_bench_fmaximumf16_group,
icount_bench_fmaximumf_group,
icount_bench_fmin_group,
icount_bench_fminf128_group,
icount_bench_fminf16_group,
icount_bench_fminf_group,
icount_bench_fminimum_group,
icount_bench_fminimum_num_group,
icount_bench_fminimum_numf128_group,
icount_bench_fminimum_numf16_group,
icount_bench_fminimum_numf_group,
icount_bench_fminimumf128_group,
icount_bench_fminimumf16_group,
icount_bench_fminimumf_group,
icount_bench_fmod_group,
icount_bench_fmodf128_group,
icount_bench_fmodf16_group,
icount_bench_fmodf_group,
icount_bench_frexp_group,
icount_bench_frexpf_group,
icount_bench_hypot_group,
icount_bench_hypotf_group,
icount_bench_ilogb_group,
icount_bench_ilogbf_group,
icount_bench_j0_group,
icount_bench_j0f_group,
icount_bench_j1_group,
icount_bench_j1f_group,
icount_bench_jn_group,
icount_bench_jnf_group,
icount_bench_ldexp_group,
icount_bench_ldexpf128_group,
icount_bench_ldexpf16_group,
icount_bench_ldexpf_group,
icount_bench_lgamma_group,
icount_bench_lgamma_r_group,
icount_bench_lgammaf_group,
icount_bench_lgammaf_r_group,
icount_bench_log10_group,
icount_bench_log10f_group,
icount_bench_log1p_group,
icount_bench_log1pf_group,
icount_bench_log2_group,
icount_bench_log2f_group,
icount_bench_log_group,
icount_bench_logf_group,
icount_bench_modf_group,
icount_bench_modff_group,
icount_bench_nextafter_group,
icount_bench_nextafterf_group,
icount_bench_pow_group,
icount_bench_powf_group,
icount_bench_remainder_group,
icount_bench_remainderf_group,
icount_bench_remquo_group,
icount_bench_remquof_group,
icount_bench_rint_group,
icount_bench_rintf128_group,
icount_bench_rintf16_group,
icount_bench_rintf_group,
icount_bench_round_group,
icount_bench_roundeven_group,
icount_bench_roundevenf128_group,
icount_bench_roundevenf16_group,
icount_bench_roundevenf_group,
icount_bench_roundf128_group,
icount_bench_roundf16_group,
icount_bench_roundf_group,
icount_bench_scalbn_group,
icount_bench_scalbnf128_group,
icount_bench_scalbnf16_group,
icount_bench_scalbnf_group,
icount_bench_sin_group,
icount_bench_sincos_group,
icount_bench_sincosf_group,
icount_bench_sinf_group,
icount_bench_sinh_group,
icount_bench_sinhf_group,
icount_bench_sqrt_group,
icount_bench_sqrtf128_group,
icount_bench_sqrtf16_group,
icount_bench_sqrtf_group,
icount_bench_tan_group,
icount_bench_tanf_group,
icount_bench_tanh_group,
icount_bench_tanhf_group,
icount_bench_tgamma_group,
icount_bench_tgammaf_group,
icount_bench_trunc_group,
icount_bench_truncf128_group,
icount_bench_truncf16_group,
icount_bench_truncf_group,
icount_bench_y0_group,
icount_bench_y0f_group,
icount_bench_y1_group,
icount_bench_y1f_group,
icount_bench_yn_group,
icount_bench_ynf_group,
// verify-sorted-end
// verify-apilist-end
);

View file

@ -0,0 +1,207 @@
use std::hint::black_box;
use std::time::Duration;
use criterion::{Criterion, criterion_main};
use libm_test::generate::random;
use libm_test::generate::random::RandomInput;
use libm_test::{CheckBasis, CheckCtx, GeneratorKind, MathOp, TupleCall};
/// Benchmark with this many items to get a variety
const BENCH_ITER_ITEMS: usize = if cfg!(feature = "short-benchmarks") { 50 } else { 500 };
/// Extra parameters we only care about if we are benchmarking against musl.
#[allow(dead_code)]
struct MuslExtra<F> {
musl_fn: Option<F>,
skip_on_i586: bool,
}
macro_rules! musl_rand_benches {
(
fn_name: $fn_name:ident,
attrs: [$($attr:meta),*],
fn_extra: ($skip_on_i586:expr, $musl_fn:expr),
) => {
paste::paste! {
$(#[$attr])*
fn [< musl_bench_ $fn_name >](c: &mut Criterion) {
type Op = libm_test::op::$fn_name::Routine;
#[cfg(feature = "build-musl")]
let musl_extra = MuslExtra::<libm_test::OpCFn<Op>> {
musl_fn: $musl_fn,
skip_on_i586: $skip_on_i586,
};
#[cfg(not(feature = "build-musl"))]
let musl_extra = MuslExtra {
musl_fn: None,
skip_on_i586: $skip_on_i586,
};
bench_one::<Op>(c, musl_extra);
}
}
};
}
fn bench_one<Op>(c: &mut Criterion, musl_extra: MuslExtra<Op::CFn>)
where
Op: MathOp,
Op::RustArgs: RandomInput,
{
let name = Op::NAME;
let ctx = CheckCtx::new(Op::IDENTIFIER, CheckBasis::Musl, GeneratorKind::Random);
let benchvec: Vec<_> =
random::get_test_cases::<Op::RustArgs>(&ctx).0.take(BENCH_ITER_ITEMS).collect();
// Perform a sanity check that we are benchmarking the same thing
// Don't test against musl if it is not available
#[cfg(feature = "build-musl")]
for input in benchvec.iter().copied() {
use anyhow::Context;
use libm_test::CheckOutput;
if cfg!(x86_no_sse) && musl_extra.skip_on_i586 {
break;
}
let Some(musl_fn) = musl_extra.musl_fn else {
continue;
};
let musl_res = input.call(musl_fn);
let crate_res = input.call(Op::ROUTINE);
crate_res.validate(musl_res, input, &ctx).context(name).unwrap();
}
#[cfg(not(feature = "build-musl"))]
let _ = musl_extra; // silence unused warnings
/* Option pointers are black boxed to avoid inlining in the benchmark loop */
let mut group = c.benchmark_group(name);
group.bench_function("crate", |b| {
b.iter(|| {
let f = black_box(Op::ROUTINE);
for input in benchvec.iter().copied() {
input.call(f);
}
})
});
// Don't test against musl if it is not available
#[cfg(feature = "build-musl")]
{
if let Some(musl_fn) = musl_extra.musl_fn {
group.bench_function("musl", |b| {
b.iter(|| {
let f = black_box(musl_fn);
for input in benchvec.iter().copied() {
input.call(f);
}
})
});
}
}
}
libm_macros::for_each_function! {
callback: musl_rand_benches,
skip: [],
fn_extra: match MACRO_FN_NAME {
// We pass a tuple of `(skip_on_i586, musl_fn)`
// FIXME(correctness): exp functions have the wrong result on i586
exp10 | exp10f | exp2 | exp2f => (true, Some(musl_math_sys::MACRO_FN_NAME)),
// Musl does not provide `f16` and `f128` functions
ceilf128
| ceilf16
| copysignf128
| copysignf16
| fabsf128
| fabsf16
| fdimf128
| fdimf16
| floorf128
| floorf16
| fmaf128
| fmaxf128
| fmaxf16
| fmaximum
| fmaximum_num
| fmaximum_numf
| fmaximum_numf128
| fmaximum_numf16
| fmaximumf
| fmaximumf128
| fmaximumf16
| fminf128
| fminf16
| fminimum
| fminimum_num
| fminimum_numf
| fminimum_numf128
| fminimum_numf16
| fminimumf
| fminimumf128
| fminimumf16
| fmodf128
| fmodf16
| ldexpf128
| ldexpf16
| rintf128
| rintf16
| roundeven
| roundevenf
| roundevenf128
| roundevenf16
| roundf128
| roundf16
| scalbnf128
| scalbnf16
| sqrtf128
| sqrtf16
| truncf128
| truncf16 => (false, None),
// By default we never skip (false) and always have a musl function available
_ => (false, Some(musl_math_sys::MACRO_FN_NAME))
}
}
macro_rules! run_callback {
(
fn_name: $fn_name:ident,
attrs: [$($attr:meta),*],
extra: [$criterion:ident],
) => {
paste::paste! {
$(#[$attr])*
[< musl_bench_ $fn_name >](&mut $criterion)
}
};
}
pub fn musl_random() {
let mut criterion = Criterion::default();
// For CI, run a short 0.5s warmup and 1.0s tests. This makes benchmarks complete in
// about the same time as other tests.
if cfg!(feature = "short-benchmarks") {
criterion = criterion
.warm_up_time(Duration::from_millis(200))
.measurement_time(Duration::from_millis(600));
}
criterion = criterion.configure_from_args();
libm_macros::for_each_function! {
callback: run_callback,
extra: [criterion],
};
}
criterion_main!(musl_random);

View file

@ -0,0 +1,9 @@
#[path = "../../libm/configure.rs"]
mod configure;
use configure::Config;
fn main() {
println!("cargo:rerun-if-changed=../../libm/configure.rs");
let cfg = Config::from_env();
configure::emit_test_config(&cfg);
}

View file

@ -0,0 +1,103 @@
//! Program to write all inputs from a generator to a file, then invoke a Julia script to plot
//! them. Output is in `target/plots`.
//!
//! Requires Julia with the `CairoMakie` dependency.
//!
//! Note that running in release mode by default generates a _lot_ more datapoints, which
//! causes plotting to be extremely slow (some simplification to be done in the script).
use std::fmt::Write as _;
use std::io::{BufWriter, Write};
use std::path::Path;
use std::process::Command;
use std::{env, fs};
use libm_test::generate::spaced::SpacedInput;
use libm_test::generate::{edge_cases, spaced};
use libm_test::{CheckBasis, CheckCtx, GeneratorKind, MathOp, op};
const JL_PLOT: &str = "examples/plot_file.jl";
fn main() {
let manifest_env = env::var("CARGO_MANIFEST_DIR").unwrap();
let manifest_dir = Path::new(&manifest_env);
let out_dir = manifest_dir.join("../../target/plots");
if !out_dir.exists() {
fs::create_dir(&out_dir).unwrap();
}
let jl_script = manifest_dir.join(JL_PLOT);
let mut config = format!(r#"out_dir = "{}""#, out_dir.display());
config.write_str("\n\n").unwrap();
// Plot a few domains with some functions that use them.
plot_one_operator::<op::sqrtf::Routine>(&out_dir, &mut config);
plot_one_operator::<op::cosf::Routine>(&out_dir, &mut config);
plot_one_operator::<op::cbrtf::Routine>(&out_dir, &mut config);
let config_path = out_dir.join("config.toml");
fs::write(&config_path, config).unwrap();
// The script expects a path to `config.toml` to be passed as its only argument
let mut cmd = Command::new("julia");
if cfg!(optimizations_enabled) {
cmd.arg("-O3");
}
cmd.arg(jl_script).arg(config_path);
println!("launching script... {cmd:?}");
cmd.status().unwrap();
}
/// Run multiple generators for a single operator.
fn plot_one_operator<Op>(out_dir: &Path, config: &mut String)
where
Op: MathOp<FTy = f32, RustArgs = (f32,)>,
Op::RustArgs: SpacedInput<Op>,
{
let mut ctx = CheckCtx::new(Op::IDENTIFIER, CheckBasis::Mpfr, GeneratorKind::QuickSpaced);
plot_one_generator(out_dir, &ctx, "logspace", config, spaced::get_test_cases::<Op>(&ctx).0);
ctx.gen_kind = GeneratorKind::EdgeCases;
plot_one_generator(
out_dir,
&ctx,
"edge_cases",
config,
edge_cases::get_test_cases::<Op>(&ctx).0,
);
}
/// Plot the output of a single generator.
fn plot_one_generator(
out_dir: &Path,
ctx: &CheckCtx,
gen_name: &str,
config: &mut String,
generator: impl Iterator<Item = (f32,)>,
) {
let fn_name = ctx.base_name_str;
let text_file = out_dir.join(format!("input-{fn_name}-{gen_name}.txt"));
let f = fs::File::create(&text_file).unwrap();
let mut w = BufWriter::new(f);
let mut count = 0u64;
for input in generator {
writeln!(w, "{:e}", input.0).unwrap();
count += 1;
}
w.flush().unwrap();
println!("generated {count} inputs for {fn_name}-{gen_name}");
writeln!(
config,
r#"[[input]]
function = "{fn_name}"
generator = "{gen_name}"
input_file = "{}"
"#,
text_file.to_str().unwrap()
)
.unwrap()
}

View file

@ -0,0 +1,171 @@
"A quick script for plotting a list of floats.
Takes a path to a TOML file (Julia has builtin TOML support but not JSON) which
specifies a list of source files to plot. Plots are done with both a linear and
a log scale.
Requires [Makie] (specifically CairoMakie) for plotting.
[Makie]: https://docs.makie.org/stable/
"
using CairoMakie
using TOML
function main()::Nothing
CairoMakie.activate!(px_per_unit = 10)
config_path = ARGS[1]
cfg = Dict()
open(config_path, "r") do f
cfg = TOML.parse(f)
end
out_dir = cfg["out_dir"]
for input in cfg["input"]
fn_name = input["function"]
gen_name = input["generator"]
input_file = input["input_file"]
plot_one(input_file, out_dir, fn_name, gen_name)
end
end
"Read inputs from a file, create both linear and log plots for one function"
function plot_one(
input_file::String,
out_dir::String,
fn_name::String,
gen_name::String,
)::Nothing
fig = Figure()
lin_out_file = joinpath(out_dir, "plot-$fn_name-$gen_name.png")
log_out_file = joinpath(out_dir, "plot-$fn_name-$gen_name-log.png")
# Map string function names to callable functions
if fn_name == "cos"
orig_func = cos
xlims = (-6.0, 6.0)
xlims_log = (-pi * 10, pi * 10)
elseif fn_name == "cbrt"
orig_func = cbrt
xlims = (-2.0, 2.0)
xlims_log = (-1000.0, 1000.0)
elseif fn_name == "sqrt"
orig_func = sqrt
xlims = (-1.1, 6.0)
xlims_log = (-1.1, 5000.0)
else
println("unrecognized function name `$fn_name`; update plot_file.jl")
exit(1)
end
# Edge cases don't do much beyond +/-1, except for infinity.
if gen_name == "edge_cases"
xlims = (-1.1, 1.1)
xlims_log = (-1.1, 1.1)
end
# Turn domain errors into NaN
func(x) = map_or(x, orig_func, NaN)
# Parse a series of X values produced by the generator
inputs = readlines(input_file)
gen_x = map((v) -> parse(Float32, v), inputs)
do_plot(
fig,
gen_x,
func,
xlims[1],
xlims[2],
"$fn_name $gen_name (linear scale)",
lin_out_file,
false,
)
do_plot(
fig,
gen_x,
func,
xlims_log[1],
xlims_log[2],
"$fn_name $gen_name (log scale)",
log_out_file,
true,
)
end
"Create a single plot"
function do_plot(
fig::Figure,
gen_x::Vector{F},
func::Function,
xmin::AbstractFloat,
xmax::AbstractFloat,
title::String,
out_file::String,
logscale::Bool,
)::Nothing where {F<:AbstractFloat}
println("plotting $title")
# `gen_x` is the values the generator produces. `actual_x` is for plotting a
# continuous function.
input_min = xmin - 1.0
input_max = xmax + 1.0
gen_x = filter((v) -> v >= input_min && v <= input_max, gen_x)
markersize = length(gen_x) < 10_000 ? 6.0 : 4.0
steps = 10_000
if logscale
r = LinRange(symlog10(input_min), symlog10(input_max), steps)
actual_x = sympow10.(r)
xscale = Makie.pseudolog10
else
actual_x = LinRange(input_min, input_max, steps)
xscale = identity
end
gen_y = @. func(gen_x)
actual_y = @. func(actual_x)
ax = Axis(fig[1, 1], xscale = xscale, title = title)
lines!(
ax,
actual_x,
actual_y,
color = (:lightblue, 0.6),
linewidth = 6.0,
label = "true function",
)
scatter!(
ax,
gen_x,
gen_y,
color = (:darkblue, 0.9),
markersize = markersize,
label = "checked inputs",
)
axislegend(ax, position = :rb, framevisible = false)
save(out_file, fig)
delete!(ax)
end
"Apply a function, returning the default if there is a domain error"
function map_or(input::AbstractFloat, f::Function, default::Any)::Union{AbstractFloat,Any}
try
return f(input)
catch
return default
end
end
# Operations for logarithms that are symmetric about 0
C = 10
symlog10(x::Number) = sign(x) * (log10(1 + abs(x) / (10^C)))
sympow10(x::Number) = (10^C) * (10^x - 1)
main()

View file

@ -0,0 +1,265 @@
//! Traits and operations related to bounds of a function.
use std::fmt;
use std::ops::Bound;
use libm::support::Int;
use crate::{BaseName, Float, FloatExt, Identifier};
/// Representation of a single dimension of a function's domain.
#[derive(Clone, Debug)]
pub struct Domain<T> {
/// Start of the region for which a function is defined (ignoring poles).
pub start: Bound<T>,
/// Endof the region for which a function is defined (ignoring poles).
pub end: Bound<T>,
/// Additional points to check closer around. These can be e.g. undefined asymptotes or
/// inflection points.
pub check_points: Option<fn() -> BoxIter<T>>,
}
type BoxIter<T> = Box<dyn Iterator<Item = T>>;
impl<F: FloatExt> Domain<F> {
/// The start of this domain, saturating at negative infinity.
pub fn range_start(&self) -> F {
match self.start {
Bound::Included(v) => v,
Bound::Excluded(v) => v.next_up(),
Bound::Unbounded => F::NEG_INFINITY,
}
}
/// The end of this domain, saturating at infinity.
pub fn range_end(&self) -> F {
match self.end {
Bound::Included(v) => v,
Bound::Excluded(v) => v.next_down(),
Bound::Unbounded => F::INFINITY,
}
}
}
/// A value that may be any float type or any integer type.
#[derive(Clone, Debug)]
pub enum EitherPrim<F, I> {
Float(F),
Int(I),
}
impl<F: fmt::Debug, I: fmt::Debug> EitherPrim<F, I> {
pub fn unwrap_float(self) -> F {
match self {
EitherPrim::Float(f) => f,
EitherPrim::Int(_) => panic!("expected float; got {self:?}"),
}
}
pub fn unwrap_int(self) -> I {
match self {
EitherPrim::Float(_) => panic!("expected int; got {self:?}"),
EitherPrim::Int(i) => i,
}
}
}
/// Convenience 1-dimensional float domains.
impl<F: Float> Domain<F> {
/// x ∈
const UNBOUNDED: Self =
Self { start: Bound::Unbounded, end: Bound::Unbounded, check_points: None };
/// x ∈ >= 0
const POSITIVE: Self =
Self { start: Bound::Included(F::ZERO), end: Bound::Unbounded, check_points: None };
/// x ∈ > 0
const STRICTLY_POSITIVE: Self =
Self { start: Bound::Excluded(F::ZERO), end: Bound::Unbounded, check_points: None };
/// Wrap in the float variant of [`EitherPrim`].
const fn into_prim_float<I>(self) -> EitherPrim<Self, Domain<I>> {
EitherPrim::Float(self)
}
}
/// Convenience 1-dimensional integer domains.
impl<I: Int> Domain<I> {
/// x ∈
const UNBOUNDED_INT: Self =
Self { start: Bound::Unbounded, end: Bound::Unbounded, check_points: None };
/// Wrap in the int variant of [`EitherPrim`].
const fn into_prim_int<F>(self) -> EitherPrim<Domain<F>, Self> {
EitherPrim::Int(self)
}
}
/// Multidimensional domains, represented as an array of 1-D domains.
impl<F: Float, I: Int> EitherPrim<Domain<F>, Domain<I>> {
/// x ∈
const UNBOUNDED1: [Self; 1] =
[Domain { start: Bound::Unbounded, end: Bound::Unbounded, check_points: None }
.into_prim_float()];
/// {x1, x2} ∈
const UNBOUNDED2: [Self; 2] =
[Domain::UNBOUNDED.into_prim_float(), Domain::UNBOUNDED.into_prim_float()];
/// {x1, x2, x3} ∈
const UNBOUNDED3: [Self; 3] = [
Domain::UNBOUNDED.into_prim_float(),
Domain::UNBOUNDED.into_prim_float(),
Domain::UNBOUNDED.into_prim_float(),
];
/// {x1, x2} ∈ , one float and one int
const UNBOUNDED_F_I: [Self; 2] =
[Domain::UNBOUNDED.into_prim_float(), Domain::UNBOUNDED_INT.into_prim_int()];
/// x ∈ >= 0
const POSITIVE: [Self; 1] = [Domain::POSITIVE.into_prim_float()];
/// x ∈ > 0
const STRICTLY_POSITIVE: [Self; 1] = [Domain::STRICTLY_POSITIVE.into_prim_float()];
/// Used for versions of `asin` and `acos`.
const INVERSE_TRIG_PERIODIC: [Self; 1] = [Domain {
start: Bound::Included(F::NEG_ONE),
end: Bound::Included(F::ONE),
check_points: None,
}
.into_prim_float()];
/// Domain for `acosh`
const ACOSH: [Self; 1] =
[Domain { start: Bound::Included(F::ONE), end: Bound::Unbounded, check_points: None }
.into_prim_float()];
/// Domain for `atanh`
const ATANH: [Self; 1] = [Domain {
start: Bound::Excluded(F::NEG_ONE),
end: Bound::Excluded(F::ONE),
check_points: None,
}
.into_prim_float()];
/// Domain for `sin`, `cos`, and `tan`
const TRIG: [Self; 1] = [Domain {
// Trig functions have special behavior at fractions of π.
check_points: Some(|| Box::new([-F::PI, -F::FRAC_PI_2, F::FRAC_PI_2, F::PI].into_iter())),
..Domain::UNBOUNDED
}
.into_prim_float()];
/// Domain for `log` in various bases
const LOG: [Self; 1] = Self::STRICTLY_POSITIVE;
/// Domain for `log1p` i.e. `log(1 + x)`
const LOG1P: [Self; 1] =
[Domain { start: Bound::Excluded(F::NEG_ONE), end: Bound::Unbounded, check_points: None }
.into_prim_float()];
/// Domain for `sqrt`
const SQRT: [Self; 1] = Self::POSITIVE;
/// Domain for `gamma`
const GAMMA: [Self; 1] = [Domain {
check_points: Some(|| {
// Negative integers are asymptotes
Box::new((0..u8::MAX).map(|scale| {
let mut base = F::ZERO;
for _ in 0..scale {
base = base - F::ONE;
}
base
}))
}),
// Whether or not gamma is defined for negative numbers is implementation dependent
..Domain::UNBOUNDED
}
.into_prim_float()];
/// Domain for `loggamma`
const LGAMMA: [Self; 1] = Self::STRICTLY_POSITIVE;
/// Domain for `jn` and `yn`.
// FIXME: the domain should provide some sort of "reasonable range" so we don't actually test
// the entire system unbounded.
const BESSEL_N: [Self; 2] =
[Domain::UNBOUNDED_INT.into_prim_int(), Domain::UNBOUNDED.into_prim_float()];
}
/// Get the domain for a given function.
pub fn get_domain<F: Float, I: Int>(
id: Identifier,
argnum: usize,
) -> EitherPrim<Domain<F>, Domain<I>> {
let x = match id.base_name() {
BaseName::Acos => &EitherPrim::INVERSE_TRIG_PERIODIC[..],
BaseName::Acosh => &EitherPrim::ACOSH[..],
BaseName::Asin => &EitherPrim::INVERSE_TRIG_PERIODIC[..],
BaseName::Asinh => &EitherPrim::UNBOUNDED1[..],
BaseName::Atan => &EitherPrim::UNBOUNDED1[..],
BaseName::Atan2 => &EitherPrim::UNBOUNDED2[..],
BaseName::Cbrt => &EitherPrim::UNBOUNDED1[..],
BaseName::Atanh => &EitherPrim::ATANH[..],
BaseName::Ceil => &EitherPrim::UNBOUNDED1[..],
BaseName::Cosh => &EitherPrim::UNBOUNDED1[..],
BaseName::Copysign => &EitherPrim::UNBOUNDED2[..],
BaseName::Cos => &EitherPrim::TRIG[..],
BaseName::Exp => &EitherPrim::UNBOUNDED1[..],
BaseName::Erf => &EitherPrim::UNBOUNDED1[..],
BaseName::Erfc => &EitherPrim::UNBOUNDED1[..],
BaseName::Expm1 => &EitherPrim::UNBOUNDED1[..],
BaseName::Exp10 => &EitherPrim::UNBOUNDED1[..],
BaseName::Exp2 => &EitherPrim::UNBOUNDED1[..],
BaseName::Frexp => &EitherPrim::UNBOUNDED1[..],
BaseName::Fabs => &EitherPrim::UNBOUNDED1[..],
BaseName::Fdim => &EitherPrim::UNBOUNDED2[..],
BaseName::Floor => &EitherPrim::UNBOUNDED1[..],
BaseName::Fma => &EitherPrim::UNBOUNDED3[..],
BaseName::Fmax => &EitherPrim::UNBOUNDED2[..],
BaseName::Fmaximum => &EitherPrim::UNBOUNDED2[..],
BaseName::FmaximumNum => &EitherPrim::UNBOUNDED2[..],
BaseName::Fmin => &EitherPrim::UNBOUNDED2[..],
BaseName::Fminimum => &EitherPrim::UNBOUNDED2[..],
BaseName::FminimumNum => &EitherPrim::UNBOUNDED2[..],
BaseName::Fmod => &EitherPrim::UNBOUNDED2[..],
BaseName::Hypot => &EitherPrim::UNBOUNDED2[..],
BaseName::Ilogb => &EitherPrim::UNBOUNDED1[..],
BaseName::J0 => &EitherPrim::UNBOUNDED1[..],
BaseName::J1 => &EitherPrim::UNBOUNDED1[..],
BaseName::Jn => &EitherPrim::BESSEL_N[..],
BaseName::Ldexp => &EitherPrim::UNBOUNDED_F_I[..],
BaseName::Lgamma => &EitherPrim::LGAMMA[..],
BaseName::LgammaR => &EitherPrim::LGAMMA[..],
BaseName::Log => &EitherPrim::LOG[..],
BaseName::Log10 => &EitherPrim::LOG[..],
BaseName::Log1p => &EitherPrim::LOG1P[..],
BaseName::Log2 => &EitherPrim::LOG[..],
BaseName::Modf => &EitherPrim::UNBOUNDED1[..],
BaseName::Nextafter => &EitherPrim::UNBOUNDED2[..],
BaseName::Pow => &EitherPrim::UNBOUNDED2[..],
BaseName::Remainder => &EitherPrim::UNBOUNDED2[..],
BaseName::Remquo => &EitherPrim::UNBOUNDED2[..],
BaseName::Rint => &EitherPrim::UNBOUNDED1[..],
BaseName::Round => &EitherPrim::UNBOUNDED1[..],
BaseName::Roundeven => &EitherPrim::UNBOUNDED1[..],
BaseName::Scalbn => &EitherPrim::UNBOUNDED_F_I[..],
BaseName::Sin => &EitherPrim::TRIG[..],
BaseName::Sincos => &EitherPrim::TRIG[..],
BaseName::Sinh => &EitherPrim::UNBOUNDED1[..],
BaseName::Sqrt => &EitherPrim::SQRT[..],
BaseName::Tan => &EitherPrim::TRIG[..],
BaseName::Tanh => &EitherPrim::UNBOUNDED1[..],
BaseName::Tgamma => &EitherPrim::GAMMA[..],
BaseName::Trunc => &EitherPrim::UNBOUNDED1[..],
BaseName::Y0 => &EitherPrim::UNBOUNDED1[..],
BaseName::Y1 => &EitherPrim::UNBOUNDED1[..],
BaseName::Yn => &EitherPrim::BESSEL_N[..],
};
x[argnum].clone()
}

View file

@ -0,0 +1,503 @@
//! An IEEE-compliant 8-bit float type for testing purposes.
use std::cmp::{self, Ordering};
use std::{fmt, ops};
use crate::Float;
/// Sometimes verifying float logic is easiest when all values can quickly be checked exhaustively
/// or by hand.
///
/// IEEE-754 compliant type that includes a 1 bit sign, 4 bit exponent, and 3 bit significand.
/// Bias is -7.
///
/// Based on <https://en.wikipedia.org/wiki/Minifloat#Example_8-bit_float_(1.4.3)>.
#[derive(Clone, Copy)]
#[repr(transparent)]
#[allow(non_camel_case_types)]
pub struct f8(u8);
impl Float for f8 {
type Int = u8;
type SignedInt = i8;
const ZERO: Self = Self(0b0_0000_000);
const NEG_ZERO: Self = Self(0b1_0000_000);
const ONE: Self = Self(0b0_0111_000);
const NEG_ONE: Self = Self(0b1_0111_000);
const MAX: Self = Self(0b0_1110_111);
const MIN: Self = Self(0b1_1110_111);
const INFINITY: Self = Self(0b0_1111_000);
const NEG_INFINITY: Self = Self(0b1_1111_000);
const NAN: Self = Self(0b0_1111_100);
const NEG_NAN: Self = Self(0b1_1111_100);
const MIN_POSITIVE_NORMAL: Self = Self(1 << Self::SIG_BITS);
// FIXME: incorrect values
const EPSILON: Self = Self::ZERO;
const PI: Self = Self::ZERO;
const NEG_PI: Self = Self::ZERO;
const FRAC_PI_2: Self = Self::ZERO;
const BITS: u32 = 8;
const SIG_BITS: u32 = 3;
const SIGN_MASK: Self::Int = 0b1_0000_000;
const SIG_MASK: Self::Int = 0b0_0000_111;
const EXP_MASK: Self::Int = 0b0_1111_000;
const IMPLICIT_BIT: Self::Int = 0b0_0001_000;
fn to_bits(self) -> Self::Int {
self.0
}
fn to_bits_signed(self) -> Self::SignedInt {
self.0 as i8
}
fn is_nan(self) -> bool {
self.0 & Self::EXP_MASK == Self::EXP_MASK && self.0 & Self::SIG_MASK != 0
}
fn is_infinite(self) -> bool {
self.0 & Self::EXP_MASK == Self::EXP_MASK && self.0 & Self::SIG_MASK == 0
}
fn is_sign_negative(self) -> bool {
self.0 & Self::SIGN_MASK != 0
}
fn from_bits(a: Self::Int) -> Self {
Self(a)
}
fn abs(self) -> Self {
libm::generic::fabs(self)
}
fn copysign(self, other: Self) -> Self {
libm::generic::copysign(self, other)
}
fn fma(self, _y: Self, _z: Self) -> Self {
unimplemented!()
}
fn normalize(_significand: Self::Int) -> (i32, Self::Int) {
unimplemented!()
}
}
impl f8 {
pub const ALL_LEN: usize = 240;
/// All non-infinite non-NaN values of `f8`
pub const ALL: [Self; Self::ALL_LEN] = [
// -m*2^7
Self(0b1_1110_111), // -240
Self(0b1_1110_110),
Self(0b1_1110_101),
Self(0b1_1110_100),
Self(0b1_1110_011),
Self(0b1_1110_010),
Self(0b1_1110_001),
Self(0b1_1110_000), // -128
// -m*2^6
Self(0b1_1101_111), // -120
Self(0b1_1101_110),
Self(0b1_1101_101),
Self(0b1_1101_100),
Self(0b1_1101_011),
Self(0b1_1101_010),
Self(0b1_1101_001),
Self(0b1_1101_000), // -64
// -m*2^5
Self(0b1_1100_111), // -60
Self(0b1_1100_110),
Self(0b1_1100_101),
Self(0b1_1100_100),
Self(0b1_1100_011),
Self(0b1_1100_010),
Self(0b1_1100_001),
Self(0b1_1100_000), // -32
// -m*2^4
Self(0b1_1011_111), // -30
Self(0b1_1011_110),
Self(0b1_1011_101),
Self(0b1_1011_100),
Self(0b1_1011_011),
Self(0b1_1011_010),
Self(0b1_1011_001),
Self(0b1_1011_000), // -16
// -m*2^3
Self(0b1_1010_111), // -15
Self(0b1_1010_110),
Self(0b1_1010_101),
Self(0b1_1010_100),
Self(0b1_1010_011),
Self(0b1_1010_010),
Self(0b1_1010_001),
Self(0b1_1010_000), // -8
// -m*2^2
Self(0b1_1001_111), // -7.5
Self(0b1_1001_110),
Self(0b1_1001_101),
Self(0b1_1001_100),
Self(0b1_1001_011),
Self(0b1_1001_010),
Self(0b1_1001_001),
Self(0b1_1001_000), // -4
// -m*2^1
Self(0b1_1000_111), // -3.75
Self(0b1_1000_110),
Self(0b1_1000_101),
Self(0b1_1000_100),
Self(0b1_1000_011),
Self(0b1_1000_010),
Self(0b1_1000_001),
Self(0b1_1000_000), // -2
// -m*2^0
Self(0b1_0111_111), // -1.875
Self(0b1_0111_110),
Self(0b1_0111_101),
Self(0b1_0111_100),
Self(0b1_0111_011),
Self(0b1_0111_010),
Self(0b1_0111_001),
Self(0b1_0111_000), // -1
// -m*2^-1
Self(0b1_0110_111), // 0.9375
Self(0b1_0110_110),
Self(0b1_0110_101),
Self(0b1_0110_100),
Self(0b1_0110_011),
Self(0b1_0110_010),
Self(0b1_0110_001),
Self(0b1_0110_000), // -0.5
// -m*2^-2
Self(0b1_0101_111), // 0.46875
Self(0b1_0101_110),
Self(0b1_0101_101),
Self(0b1_0101_100),
Self(0b1_0101_011),
Self(0b1_0101_010),
Self(0b1_0101_001),
Self(0b1_0101_000), // -0.25
// -m*2^-3
Self(0b1_0100_111), // 0.234375
Self(0b1_0100_110),
Self(0b1_0100_101),
Self(0b1_0100_100),
Self(0b1_0100_011),
Self(0b1_0100_010),
Self(0b1_0100_001),
Self(0b1_0100_000), // -0.125
// -m*2^-4
Self(0b1_0011_111), // 0.1171875
Self(0b1_0011_110),
Self(0b1_0011_101),
Self(0b1_0011_100),
Self(0b1_0011_011),
Self(0b1_0011_010),
Self(0b1_0011_001),
Self(0b1_0011_000), // 0.0625
// -m*2^-5
Self(0b1_0010_111), // 0.05859375
Self(0b1_0010_110),
Self(0b1_0010_101),
Self(0b1_0010_100),
Self(0b1_0010_011),
Self(0b1_0010_010),
Self(0b1_0010_001),
Self(0b1_0010_000), // 0.03125
// -m*2^-6
Self(0b1_0001_111), // 0.029296875
Self(0b1_0001_110),
Self(0b1_0001_101),
Self(0b1_0001_100),
Self(0b1_0001_011),
Self(0b1_0001_010),
Self(0b1_0001_001),
Self(0b1_0001_000), // 0.015625
// -m*2^-7 subnormal numbers
Self(0b1_0000_111), // 0.013671875
Self(0b1_0000_110),
Self(0b1_0000_101),
Self(0b1_0000_100),
Self(0b1_0000_011),
Self(0b1_0000_010),
Self(0b1_0000_001), // 0.001953125
// Zeroes
Self(0b1_0000_000), // -0.0
Self(0b0_0000_000), // 0.0
// m*2^-7 // subnormal numbers
Self(0b0_0000_001),
Self(0b0_0000_010),
Self(0b0_0000_011),
Self(0b0_0000_100),
Self(0b0_0000_101),
Self(0b0_0000_110),
Self(0b0_0000_111), // 0.013671875
// m*2^-6
Self(0b0_0001_000), // 0.015625
Self(0b0_0001_001),
Self(0b0_0001_010),
Self(0b0_0001_011),
Self(0b0_0001_100),
Self(0b0_0001_101),
Self(0b0_0001_110),
Self(0b0_0001_111), // 0.029296875
// m*2^-5
Self(0b0_0010_000), // 0.03125
Self(0b0_0010_001),
Self(0b0_0010_010),
Self(0b0_0010_011),
Self(0b0_0010_100),
Self(0b0_0010_101),
Self(0b0_0010_110),
Self(0b0_0010_111), // 0.05859375
// m*2^-4
Self(0b0_0011_000), // 0.0625
Self(0b0_0011_001),
Self(0b0_0011_010),
Self(0b0_0011_011),
Self(0b0_0011_100),
Self(0b0_0011_101),
Self(0b0_0011_110),
Self(0b0_0011_111), // 0.1171875
// m*2^-3
Self(0b0_0100_000), // 0.125
Self(0b0_0100_001),
Self(0b0_0100_010),
Self(0b0_0100_011),
Self(0b0_0100_100),
Self(0b0_0100_101),
Self(0b0_0100_110),
Self(0b0_0100_111), // 0.234375
// m*2^-2
Self(0b0_0101_000), // 0.25
Self(0b0_0101_001),
Self(0b0_0101_010),
Self(0b0_0101_011),
Self(0b0_0101_100),
Self(0b0_0101_101),
Self(0b0_0101_110),
Self(0b0_0101_111), // 0.46875
// m*2^-1
Self(0b0_0110_000), // 0.5
Self(0b0_0110_001),
Self(0b0_0110_010),
Self(0b0_0110_011),
Self(0b0_0110_100),
Self(0b0_0110_101),
Self(0b0_0110_110),
Self(0b0_0110_111), // 0.9375
// m*2^0
Self(0b0_0111_000), // 1
Self(0b0_0111_001),
Self(0b0_0111_010),
Self(0b0_0111_011),
Self(0b0_0111_100),
Self(0b0_0111_101),
Self(0b0_0111_110),
Self(0b0_0111_111), // 1.875
// m*2^1
Self(0b0_1000_000), // 2
Self(0b0_1000_001),
Self(0b0_1000_010),
Self(0b0_1000_011),
Self(0b0_1000_100),
Self(0b0_1000_101),
Self(0b0_1000_110),
Self(0b0_1000_111), // 3.75
// m*2^2
Self(0b0_1001_000), // 4
Self(0b0_1001_001),
Self(0b0_1001_010),
Self(0b0_1001_011),
Self(0b0_1001_100),
Self(0b0_1001_101),
Self(0b0_1001_110),
Self(0b0_1001_111), // 7.5
// m*2^3
Self(0b0_1010_000), // 8
Self(0b0_1010_001),
Self(0b0_1010_010),
Self(0b0_1010_011),
Self(0b0_1010_100),
Self(0b0_1010_101),
Self(0b0_1010_110),
Self(0b0_1010_111), // 15
// m*2^4
Self(0b0_1011_000), // 16
Self(0b0_1011_001),
Self(0b0_1011_010),
Self(0b0_1011_011),
Self(0b0_1011_100),
Self(0b0_1011_101),
Self(0b0_1011_110),
Self(0b0_1011_111), // 30
// m*2^5
Self(0b0_1100_000), // 32
Self(0b0_1100_001),
Self(0b0_1100_010),
Self(0b0_1100_011),
Self(0b0_1100_100),
Self(0b0_1100_101),
Self(0b0_1100_110),
Self(0b0_1100_111), // 60
// m*2^6
Self(0b0_1101_000), // 64
Self(0b0_1101_001),
Self(0b0_1101_010),
Self(0b0_1101_011),
Self(0b0_1101_100),
Self(0b0_1101_101),
Self(0b0_1101_110),
Self(0b0_1101_111), // 120
// m*2^7
Self(0b0_1110_000), // 128
Self(0b0_1110_001),
Self(0b0_1110_010),
Self(0b0_1110_011),
Self(0b0_1110_100),
Self(0b0_1110_101),
Self(0b0_1110_110),
Self(0b0_1110_111), // 240
];
}
impl ops::Add for f8 {
type Output = Self;
fn add(self, _rhs: Self) -> Self::Output {
unimplemented!()
}
}
impl ops::Sub for f8 {
type Output = Self;
fn sub(self, _rhs: Self) -> Self::Output {
unimplemented!()
}
}
impl ops::Mul for f8 {
type Output = Self;
fn mul(self, _rhs: Self) -> Self::Output {
unimplemented!()
}
}
impl ops::Div for f8 {
type Output = Self;
fn div(self, _rhs: Self) -> Self::Output {
unimplemented!()
}
}
impl ops::Neg for f8 {
type Output = Self;
fn neg(self) -> Self::Output {
Self(self.0 ^ Self::SIGN_MASK)
}
}
impl ops::Rem for f8 {
type Output = Self;
fn rem(self, _rhs: Self) -> Self::Output {
unimplemented!()
}
}
impl ops::AddAssign for f8 {
fn add_assign(&mut self, _rhs: Self) {
unimplemented!()
}
}
impl ops::SubAssign for f8 {
fn sub_assign(&mut self, _rhs: Self) {
unimplemented!()
}
}
impl ops::MulAssign for f8 {
fn mul_assign(&mut self, _rhs: Self) {
unimplemented!()
}
}
impl cmp::PartialEq for f8 {
fn eq(&self, other: &Self) -> bool {
if self.is_nan() || other.is_nan() {
false
} else if self.abs().to_bits() | other.abs().to_bits() == 0 {
true
} else {
self.0 == other.0
}
}
}
impl cmp::PartialOrd for f8 {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let inf_rep = f8::EXP_MASK;
let a_abs = self.abs().to_bits();
let b_abs = other.abs().to_bits();
// If either a or b is NaN, they are unordered.
if a_abs > inf_rep || b_abs > inf_rep {
return None;
}
// If a and b are both zeros, they are equal.
if a_abs | b_abs == 0 {
return Some(Ordering::Equal);
}
let a_srep = self.to_bits_signed();
let b_srep = other.to_bits_signed();
let res = a_srep.cmp(&b_srep);
if a_srep & b_srep >= 0 {
// If at least one of a and b is positive, we get the same result comparing
// a and b as signed integers as we would with a fp_ting-point compare.
Some(res)
} else {
// Otherwise, both are negative, so we need to flip the sense of the
// comparison to get the correct result.
Some(res.reverse())
}
}
}
impl fmt::Display for f8 {
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
unimplemented!()
}
}
impl fmt::Debug for f8 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Binary::fmt(self, f)
}
}
impl fmt::Binary for f8 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let v = self.0;
write!(
f,
"0b{:b}_{:04b}_{:03b}",
v >> 7,
(v & Self::EXP_MASK) >> Self::SIG_BITS,
v & Self::SIG_MASK
)
}
}
impl fmt::LowerHex for f8 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
pub const fn hf8(s: &str) -> f8 {
let Ok(bits) = libm::support::hex_float::parse_hex_exact(s, 8, 3) else { panic!() };
f8(bits as u8)
}

View file

@ -0,0 +1,43 @@
//! Different generators that can create random or systematic bit patterns.
pub mod case_list;
pub mod edge_cases;
pub mod random;
pub mod spaced;
/// A wrapper to turn any iterator into an `ExactSizeIterator`. Asserts the final result to ensure
/// the provided size was correct.
#[derive(Debug)]
pub struct KnownSize<I> {
total: u64,
current: u64,
iter: I,
}
impl<I> KnownSize<I> {
pub fn new(iter: I, total: u64) -> Self {
Self { total, current: 0, iter }
}
}
impl<I: Iterator> Iterator for KnownSize<I> {
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
let next = self.iter.next();
if next.is_some() {
self.current += 1;
return next;
}
assert_eq!(self.current, self.total, "total items did not match expected");
None
}
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = usize::try_from(self.total - self.current).unwrap();
(remaining, Some(remaining))
}
}
impl<I: Iterator> ExactSizeIterator for KnownSize<I> {}

View file

@ -0,0 +1,853 @@
//! Test cases to verify specific values.
//!
//! Each routine can have a set of inputs and, optinoally, outputs. If an output is provided, it
//! will be used to check against. If only inputs are provided, the case will be checked against
//! a basis.
//!
//! This is useful for adding regression tests or expected failures.
use libm::hf64;
#[cfg(f128_enabled)]
use libm::hf128;
use crate::{CheckBasis, CheckCtx, GeneratorKind, MathOp, op};
pub struct TestCase<Op: MathOp> {
pub input: Op::RustArgs,
pub output: Option<Op::RustRet>,
}
impl<Op: MathOp> TestCase<Op> {
#[expect(dead_code)]
fn append_inputs(v: &mut Vec<Self>, l: &[Op::RustArgs]) {
v.extend(l.iter().copied().map(|input| Self { input, output: None }));
}
fn append_pairs(v: &mut Vec<Self>, l: &[(Op::RustArgs, Option<Op::RustRet>)])
where
Op::RustRet: Copy,
{
v.extend(l.iter().copied().map(|(input, output)| Self { input, output }));
}
}
fn acos_cases() -> Vec<TestCase<op::acos::Routine>> {
vec![]
}
fn acosf_cases() -> Vec<TestCase<op::acosf::Routine>> {
vec![]
}
fn acosh_cases() -> Vec<TestCase<op::acosh::Routine>> {
vec![]
}
fn acoshf_cases() -> Vec<TestCase<op::acoshf::Routine>> {
vec![]
}
fn asin_cases() -> Vec<TestCase<op::asin::Routine>> {
vec![]
}
fn asinf_cases() -> Vec<TestCase<op::asinf::Routine>> {
vec![]
}
fn asinh_cases() -> Vec<TestCase<op::asinh::Routine>> {
vec![]
}
fn asinhf_cases() -> Vec<TestCase<op::asinhf::Routine>> {
vec![]
}
fn atan_cases() -> Vec<TestCase<op::atan::Routine>> {
vec![]
}
fn atan2_cases() -> Vec<TestCase<op::atan2::Routine>> {
vec![]
}
fn atan2f_cases() -> Vec<TestCase<op::atan2f::Routine>> {
vec![]
}
fn atanf_cases() -> Vec<TestCase<op::atanf::Routine>> {
vec![]
}
fn atanh_cases() -> Vec<TestCase<op::atanh::Routine>> {
vec![]
}
fn atanhf_cases() -> Vec<TestCase<op::atanhf::Routine>> {
vec![]
}
fn cbrt_cases() -> Vec<TestCase<op::cbrt::Routine>> {
vec![]
}
fn cbrtf_cases() -> Vec<TestCase<op::cbrtf::Routine>> {
vec![]
}
fn ceil_cases() -> Vec<TestCase<op::ceil::Routine>> {
vec![]
}
fn ceilf_cases() -> Vec<TestCase<op::ceilf::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn ceilf128_cases() -> Vec<TestCase<op::ceilf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn ceilf16_cases() -> Vec<TestCase<op::ceilf16::Routine>> {
vec![]
}
fn copysign_cases() -> Vec<TestCase<op::copysign::Routine>> {
vec![]
}
fn copysignf_cases() -> Vec<TestCase<op::copysignf::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn copysignf128_cases() -> Vec<TestCase<op::copysignf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn copysignf16_cases() -> Vec<TestCase<op::copysignf16::Routine>> {
vec![]
}
fn cos_cases() -> Vec<TestCase<op::cos::Routine>> {
vec![]
}
fn cosf_cases() -> Vec<TestCase<op::cosf::Routine>> {
vec![]
}
fn cosh_cases() -> Vec<TestCase<op::cosh::Routine>> {
vec![]
}
fn coshf_cases() -> Vec<TestCase<op::coshf::Routine>> {
vec![]
}
fn erf_cases() -> Vec<TestCase<op::erf::Routine>> {
vec![]
}
fn erfc_cases() -> Vec<TestCase<op::erfc::Routine>> {
vec![]
}
fn erfcf_cases() -> Vec<TestCase<op::erfcf::Routine>> {
vec![]
}
fn erff_cases() -> Vec<TestCase<op::erff::Routine>> {
vec![]
}
fn exp_cases() -> Vec<TestCase<op::exp::Routine>> {
vec![]
}
fn exp10_cases() -> Vec<TestCase<op::exp10::Routine>> {
vec![]
}
fn exp10f_cases() -> Vec<TestCase<op::exp10f::Routine>> {
vec![]
}
fn exp2_cases() -> Vec<TestCase<op::exp2::Routine>> {
vec![]
}
fn exp2f_cases() -> Vec<TestCase<op::exp2f::Routine>> {
vec![]
}
fn expf_cases() -> Vec<TestCase<op::expf::Routine>> {
vec![]
}
fn expm1_cases() -> Vec<TestCase<op::expm1::Routine>> {
vec![]
}
fn expm1f_cases() -> Vec<TestCase<op::expm1f::Routine>> {
vec![]
}
fn fabs_cases() -> Vec<TestCase<op::fabs::Routine>> {
vec![]
}
fn fabsf_cases() -> Vec<TestCase<op::fabsf::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn fabsf128_cases() -> Vec<TestCase<op::fabsf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn fabsf16_cases() -> Vec<TestCase<op::fabsf16::Routine>> {
vec![]
}
fn fdim_cases() -> Vec<TestCase<op::fdim::Routine>> {
vec![]
}
fn fdimf_cases() -> Vec<TestCase<op::fdimf::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn fdimf128_cases() -> Vec<TestCase<op::fdimf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn fdimf16_cases() -> Vec<TestCase<op::fdimf16::Routine>> {
vec![]
}
fn floor_cases() -> Vec<TestCase<op::floor::Routine>> {
vec![]
}
fn floorf_cases() -> Vec<TestCase<op::floorf::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn floorf128_cases() -> Vec<TestCase<op::floorf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn floorf16_cases() -> Vec<TestCase<op::floorf16::Routine>> {
vec![]
}
fn fma_cases() -> Vec<TestCase<op::fma::Routine>> {
let mut v = vec![];
TestCase::append_pairs(
&mut v,
&[
// Previous failure with incorrect sign
((5e-324, -5e-324, 0.0), Some(-0.0)),
],
);
v
}
fn fmaf_cases() -> Vec<TestCase<op::fmaf::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn fmaf128_cases() -> Vec<TestCase<op::fmaf128::Routine>> {
let mut v = vec![];
TestCase::append_pairs(
&mut v,
&[
(
// Tricky rounding case that previously failed in extensive tests
(
hf128!("-0x1.1966cc01966cc01966cc01966f06p-25"),
hf128!("-0x1.669933fe69933fe69933fe6997c9p-16358"),
hf128!("-0x0.000000000000000000000000048ap-16382"),
),
Some(hf128!("0x0.c5171470a3ff5e0f68d751491b18p-16382")),
),
(
// Subnormal edge case that caused a failure
(
hf128!("0x0.7ffffffffffffffffffffffffff7p-16382"),
hf128!("0x1.ffffffffffffffffffffffffffffp-1"),
hf128!("0x0.8000000000000000000000000009p-16382"),
),
Some(hf128!("0x1.0000000000000000000000000000p-16382")),
),
],
);
v
}
#[cfg(f16_enabled)]
fn fmaxf16_cases() -> Vec<TestCase<op::fmaxf16::Routine>> {
vec![]
}
fn fmaxf_cases() -> Vec<TestCase<op::fmaxf::Routine>> {
vec![]
}
fn fmax_cases() -> Vec<TestCase<op::fmax::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn fmaxf128_cases() -> Vec<TestCase<op::fmaxf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn fmaximumf16_cases() -> Vec<TestCase<op::fmaximumf16::Routine>> {
vec![]
}
fn fmaximumf_cases() -> Vec<TestCase<op::fmaximumf::Routine>> {
vec![]
}
fn fmaximum_cases() -> Vec<TestCase<op::fmaximum::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn fmaximumf128_cases() -> Vec<TestCase<op::fmaximumf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn fmaximum_numf16_cases() -> Vec<TestCase<op::fmaximum_numf16::Routine>> {
vec![]
}
fn fmaximum_numf_cases() -> Vec<TestCase<op::fmaximum_numf::Routine>> {
vec![]
}
fn fmaximum_num_cases() -> Vec<TestCase<op::fmaximum_num::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn fmaximum_numf128_cases() -> Vec<TestCase<op::fmaximum_numf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn fminf16_cases() -> Vec<TestCase<op::fminf16::Routine>> {
vec![]
}
fn fminf_cases() -> Vec<TestCase<op::fminf::Routine>> {
vec![]
}
fn fmin_cases() -> Vec<TestCase<op::fmin::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn fminf128_cases() -> Vec<TestCase<op::fminf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn fminimumf16_cases() -> Vec<TestCase<op::fminimumf16::Routine>> {
vec![]
}
fn fminimumf_cases() -> Vec<TestCase<op::fminimumf::Routine>> {
vec![]
}
fn fminimum_cases() -> Vec<TestCase<op::fminimum::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn fminimumf128_cases() -> Vec<TestCase<op::fminimumf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn fminimum_numf16_cases() -> Vec<TestCase<op::fminimum_numf16::Routine>> {
vec![]
}
fn fminimum_numf_cases() -> Vec<TestCase<op::fminimum_numf::Routine>> {
vec![]
}
fn fminimum_num_cases() -> Vec<TestCase<op::fminimum_num::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn fminimum_numf128_cases() -> Vec<TestCase<op::fminimum_numf128::Routine>> {
vec![]
}
fn fmod_cases() -> Vec<TestCase<op::fmod::Routine>> {
let mut v = vec![];
TestCase::append_pairs(
&mut v,
&[
// Previous failure with incorrect loop iteration
// <https://github.com/rust-lang/libm/pull/469#discussion_r2022337272>
((2.1, 3.123e-320), Some(2.0696e-320)),
((2.1, 2.253547e-318), Some(1.772535e-318)),
],
);
v
}
fn fmodf_cases() -> Vec<TestCase<op::fmodf::Routine>> {
let mut v = vec![];
TestCase::append_pairs(
&mut v,
&[
// Previous failure with incorrect loop iteration
// <https://github.com/rust-lang/libm/pull/469#discussion_r2022337272>
((2.1, 8.858e-42), Some(8.085e-42)),
((2.1, 6.39164e-40), Some(6.1636e-40)),
((5.5, 6.39164e-40), Some(4.77036e-40)),
((-151.189, 6.39164e-40), Some(-5.64734e-40)),
],
);
v
}
#[cfg(f128_enabled)]
fn fmodf128_cases() -> Vec<TestCase<op::fmodf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn fmodf16_cases() -> Vec<TestCase<op::fmodf16::Routine>> {
vec![]
}
fn frexp_cases() -> Vec<TestCase<op::frexp::Routine>> {
vec![]
}
fn frexpf_cases() -> Vec<TestCase<op::frexpf::Routine>> {
vec![]
}
fn hypot_cases() -> Vec<TestCase<op::hypot::Routine>> {
vec![]
}
fn hypotf_cases() -> Vec<TestCase<op::hypotf::Routine>> {
vec![]
}
fn ilogb_cases() -> Vec<TestCase<op::ilogb::Routine>> {
vec![]
}
fn ilogbf_cases() -> Vec<TestCase<op::ilogbf::Routine>> {
vec![]
}
fn j0_cases() -> Vec<TestCase<op::j0::Routine>> {
vec![]
}
fn j0f_cases() -> Vec<TestCase<op::j0f::Routine>> {
vec![]
}
fn j1_cases() -> Vec<TestCase<op::j1::Routine>> {
vec![]
}
fn j1f_cases() -> Vec<TestCase<op::j1f::Routine>> {
vec![]
}
fn jn_cases() -> Vec<TestCase<op::jn::Routine>> {
vec![]
}
fn jnf_cases() -> Vec<TestCase<op::jnf::Routine>> {
vec![]
}
fn ldexp_cases() -> Vec<TestCase<op::ldexp::Routine>> {
vec![]
}
fn ldexpf_cases() -> Vec<TestCase<op::ldexpf::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn ldexpf128_cases() -> Vec<TestCase<op::ldexpf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn ldexpf16_cases() -> Vec<TestCase<op::ldexpf16::Routine>> {
vec![]
}
fn lgamma_cases() -> Vec<TestCase<op::lgamma::Routine>> {
vec![]
}
fn lgamma_r_cases() -> Vec<TestCase<op::lgamma_r::Routine>> {
vec![]
}
fn lgammaf_cases() -> Vec<TestCase<op::lgammaf::Routine>> {
vec![]
}
fn lgammaf_r_cases() -> Vec<TestCase<op::lgammaf_r::Routine>> {
vec![]
}
fn log_cases() -> Vec<TestCase<op::log::Routine>> {
vec![]
}
fn log10_cases() -> Vec<TestCase<op::log10::Routine>> {
vec![]
}
fn log10f_cases() -> Vec<TestCase<op::log10f::Routine>> {
vec![]
}
fn log1p_cases() -> Vec<TestCase<op::log1p::Routine>> {
vec![]
}
fn log1pf_cases() -> Vec<TestCase<op::log1pf::Routine>> {
vec![]
}
fn log2_cases() -> Vec<TestCase<op::log2::Routine>> {
vec![]
}
fn log2f_cases() -> Vec<TestCase<op::log2f::Routine>> {
vec![]
}
fn logf_cases() -> Vec<TestCase<op::logf::Routine>> {
vec![]
}
fn modf_cases() -> Vec<TestCase<op::modf::Routine>> {
vec![]
}
fn modff_cases() -> Vec<TestCase<op::modff::Routine>> {
vec![]
}
fn nextafter_cases() -> Vec<TestCase<op::nextafter::Routine>> {
vec![]
}
fn nextafterf_cases() -> Vec<TestCase<op::nextafterf::Routine>> {
vec![]
}
fn pow_cases() -> Vec<TestCase<op::pow::Routine>> {
vec![]
}
fn powf_cases() -> Vec<TestCase<op::powf::Routine>> {
vec![]
}
fn remainder_cases() -> Vec<TestCase<op::remainder::Routine>> {
vec![]
}
fn remainderf_cases() -> Vec<TestCase<op::remainderf::Routine>> {
vec![]
}
fn remquo_cases() -> Vec<TestCase<op::remquo::Routine>> {
vec![]
}
fn remquof_cases() -> Vec<TestCase<op::remquof::Routine>> {
vec![]
}
fn rint_cases() -> Vec<TestCase<op::rint::Routine>> {
let mut v = vec![];
TestCase::append_pairs(
&mut v,
&[
// Known failure on i586
#[cfg(not(x86_no_sse))]
((hf64!("-0x1.e3f13ff995ffcp+38"),), Some(hf64!("-0x1.e3f13ff994000p+38"))),
#[cfg(x86_no_sse)]
((hf64!("-0x1.e3f13ff995ffcp+38"),), Some(hf64!("-0x1.e3f13ff998000p+38"))),
],
);
v
}
fn rintf_cases() -> Vec<TestCase<op::rintf::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn rintf128_cases() -> Vec<TestCase<op::rintf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn rintf16_cases() -> Vec<TestCase<op::rintf16::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn roundf16_cases() -> Vec<TestCase<op::roundf16::Routine>> {
vec![]
}
fn round_cases() -> Vec<TestCase<op::round::Routine>> {
vec![]
}
fn roundf_cases() -> Vec<TestCase<op::roundf::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn roundf128_cases() -> Vec<TestCase<op::roundf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn roundevenf16_cases() -> Vec<TestCase<op::roundevenf16::Routine>> {
vec![]
}
fn roundeven_cases() -> Vec<TestCase<op::roundeven::Routine>> {
let mut v = vec![];
TestCase::append_pairs(
&mut v,
&[
// Known failure on i586
#[cfg(not(x86_no_sse))]
((hf64!("-0x1.e3f13ff995ffcp+38"),), Some(hf64!("-0x1.e3f13ff994000p+38"))),
#[cfg(x86_no_sse)]
((hf64!("-0x1.e3f13ff995ffcp+38"),), Some(hf64!("-0x1.e3f13ff998000p+38"))),
],
);
v
}
fn roundevenf_cases() -> Vec<TestCase<op::roundevenf::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn roundevenf128_cases() -> Vec<TestCase<op::roundevenf128::Routine>> {
vec![]
}
fn scalbn_cases() -> Vec<TestCase<op::scalbn::Routine>> {
vec![]
}
fn scalbnf_cases() -> Vec<TestCase<op::scalbnf::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn scalbnf128_cases() -> Vec<TestCase<op::scalbnf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn scalbnf16_cases() -> Vec<TestCase<op::scalbnf16::Routine>> {
vec![]
}
fn sin_cases() -> Vec<TestCase<op::sin::Routine>> {
vec![]
}
fn sincos_cases() -> Vec<TestCase<op::sincos::Routine>> {
vec![]
}
fn sincosf_cases() -> Vec<TestCase<op::sincosf::Routine>> {
vec![]
}
fn sinf_cases() -> Vec<TestCase<op::sinf::Routine>> {
vec![]
}
fn sinh_cases() -> Vec<TestCase<op::sinh::Routine>> {
vec![]
}
fn sinhf_cases() -> Vec<TestCase<op::sinhf::Routine>> {
vec![]
}
fn sqrt_cases() -> Vec<TestCase<op::sqrt::Routine>> {
vec![]
}
fn sqrtf_cases() -> Vec<TestCase<op::sqrtf::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn sqrtf128_cases() -> Vec<TestCase<op::sqrtf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn sqrtf16_cases() -> Vec<TestCase<op::sqrtf16::Routine>> {
vec![]
}
fn tan_cases() -> Vec<TestCase<op::tan::Routine>> {
vec![]
}
fn tanf_cases() -> Vec<TestCase<op::tanf::Routine>> {
vec![]
}
fn tanh_cases() -> Vec<TestCase<op::tanh::Routine>> {
vec![]
}
fn tanhf_cases() -> Vec<TestCase<op::tanhf::Routine>> {
vec![]
}
fn tgamma_cases() -> Vec<TestCase<op::tgamma::Routine>> {
vec![]
}
fn tgammaf_cases() -> Vec<TestCase<op::tgammaf::Routine>> {
vec![]
}
fn trunc_cases() -> Vec<TestCase<op::trunc::Routine>> {
vec![]
}
fn truncf_cases() -> Vec<TestCase<op::truncf::Routine>> {
vec![]
}
#[cfg(f128_enabled)]
fn truncf128_cases() -> Vec<TestCase<op::truncf128::Routine>> {
vec![]
}
#[cfg(f16_enabled)]
fn truncf16_cases() -> Vec<TestCase<op::truncf16::Routine>> {
vec![]
}
fn y0_cases() -> Vec<TestCase<op::y0::Routine>> {
vec![]
}
fn y0f_cases() -> Vec<TestCase<op::y0f::Routine>> {
vec![]
}
fn y1_cases() -> Vec<TestCase<op::y1::Routine>> {
vec![]
}
fn y1f_cases() -> Vec<TestCase<op::y1f::Routine>> {
vec![]
}
fn yn_cases() -> Vec<TestCase<op::yn::Routine>> {
vec![]
}
fn ynf_cases() -> Vec<TestCase<op::ynf::Routine>> {
vec![]
}
pub trait CaseListInput: MathOp + Sized {
fn get_cases() -> Vec<TestCase<Self>>;
}
macro_rules! impl_case_list {
(
fn_name: $fn_name:ident,
attrs: [$($attr:meta),*],
) => {
paste::paste! {
$(#[$attr])*
impl CaseListInput for crate::op::$fn_name::Routine {
fn get_cases() -> Vec<TestCase<Self>> {
[< $fn_name _cases >]()
}
}
}
};
}
libm_macros::for_each_function! {
callback: impl_case_list,
}
/// This is the test generator for standalone tests, i.e. those with no basis. For this, it
/// only extracts tests with a known output.
pub fn get_test_cases_standalone<Op>(
ctx: &CheckCtx,
) -> impl Iterator<Item = (Op::RustArgs, Op::RustRet)> + use<'_, Op>
where
Op: MathOp + CaseListInput,
{
assert_eq!(ctx.basis, CheckBasis::None);
assert_eq!(ctx.gen_kind, GeneratorKind::List);
Op::get_cases().into_iter().filter_map(|x| x.output.map(|o| (x.input, o)))
}
/// Opposite of the above; extract only test cases that don't have a known output, to be run
/// against a basis.
pub fn get_test_cases_basis<Op>(
ctx: &CheckCtx,
) -> (impl Iterator<Item = Op::RustArgs> + use<'_, Op>, u64)
where
Op: MathOp + CaseListInput,
{
assert_ne!(ctx.basis, CheckBasis::None);
assert_eq!(ctx.gen_kind, GeneratorKind::List);
let cases = Op::get_cases();
let count: u64 = cases.iter().filter(|case| case.output.is_none()).count().try_into().unwrap();
(cases.into_iter().filter(|x| x.output.is_none()).map(|x| x.input), count)
}

View file

@ -0,0 +1,310 @@
//! A generator that checks a handful of cases near infinities, zeros, asymptotes, and NaNs.
use libm::support::{CastInto, Float, Int, MinInt};
use crate::domain::get_domain;
use crate::generate::KnownSize;
use crate::op::OpITy;
use crate::run_cfg::{check_near_count, check_point_count};
use crate::{BaseName, CheckCtx, FloatExt, FloatTy, MathOp, test_log};
/// Generate a sequence of edge cases, e.g. numbers near zeroes and infiniteis.
pub trait EdgeCaseInput<Op> {
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self> + Send, u64);
}
/// Create a list of values around interesting points (infinities, zeroes, NaNs).
fn float_edge_cases<Op>(
ctx: &CheckCtx,
argnum: usize,
) -> (impl Iterator<Item = Op::FTy> + Clone, u64)
where
Op: MathOp,
{
let mut ret = Vec::new();
let one = OpITy::<Op>::ONE;
let values = &mut ret;
let domain = get_domain::<_, i8>(ctx.fn_ident, argnum).unwrap_float();
let domain_start = domain.range_start();
let domain_end = domain.range_end();
let check_points = check_point_count(ctx);
let near_points = check_near_count(ctx);
// Check near some notable constants
count_up(Op::FTy::ONE, near_points, values);
count_up(Op::FTy::ZERO, near_points, values);
count_up(Op::FTy::NEG_ONE, near_points, values);
count_down(Op::FTy::ONE, near_points, values);
count_down(Op::FTy::ZERO, near_points, values);
count_down(Op::FTy::NEG_ONE, near_points, values);
values.push(Op::FTy::NEG_ZERO);
// Check values near the extremes
count_up(Op::FTy::NEG_INFINITY, near_points, values);
count_down(Op::FTy::INFINITY, near_points, values);
count_down(domain_end, near_points, values);
count_up(domain_start, near_points, values);
count_down(domain_start, near_points, values);
count_up(domain_end, near_points, values);
count_down(domain_end, near_points, values);
// Check some special values that aren't included in the above ranges
values.push(Op::FTy::NAN);
values.extend(Op::FTy::consts().iter());
// Check around the maximum subnormal value
let sub_max = Op::FTy::from_bits(Op::FTy::SIG_MASK);
count_up(sub_max, near_points, values);
count_down(sub_max, near_points, values);
count_up(-sub_max, near_points, values);
count_down(-sub_max, near_points, values);
// Check a few values around the subnormal range
for shift in (0..Op::FTy::SIG_BITS).step_by(Op::FTy::SIG_BITS as usize / 5) {
let v = Op::FTy::from_bits(one << shift);
count_up(v, 2, values);
count_down(v, 2, values);
count_up(-v, 2, values);
count_down(-v, 2, values);
}
// Check around asymptotes
if let Some(f) = domain.check_points {
let iter = f();
for x in iter.take(check_points) {
count_up(x, near_points, values);
count_down(x, near_points, values);
}
}
// Some results may overlap so deduplicate the vector to save test cycles.
values.sort_by_key(|x| x.to_bits());
values.dedup_by_key(|x| x.to_bits());
let count = ret.len().try_into().unwrap();
test_log(&format!(
"{gen_kind:?} {basis:?} {fn_ident} arg {arg}/{args}: {count} edge cases",
gen_kind = ctx.gen_kind,
basis = ctx.basis,
fn_ident = ctx.fn_ident,
arg = argnum + 1,
args = ctx.input_count(),
));
(ret.into_iter(), count)
}
/// Add `points` values starting at and including `x` and counting up. Uses the smallest possible
/// increments (1 ULP).
fn count_up<F: Float>(mut x: F, points: u64, values: &mut Vec<F>) {
assert!(!x.is_nan());
let mut count = 0;
while x < F::INFINITY && count < points {
values.push(x);
x = x.next_up();
count += 1;
}
}
/// Add `points` values starting at and including `x` and counting down. Uses the smallest possible
/// increments (1 ULP).
fn count_down<F: Float>(mut x: F, points: u64, values: &mut Vec<F>) {
assert!(!x.is_nan());
let mut count = 0;
while x > F::NEG_INFINITY && count < points {
values.push(x);
x = x.next_down();
count += 1;
}
}
/// Create a list of values around interesting integer points (min, zero, max).
pub fn int_edge_cases<I: Int>(
ctx: &CheckCtx,
argnum: usize,
) -> (impl Iterator<Item = I> + Clone, u64)
where
i32: CastInto<I>,
{
let mut values = Vec::new();
let near_points = check_near_count(ctx);
// Check around max/min and zero
int_count_around(I::MIN, near_points, &mut values);
int_count_around(I::MAX, near_points, &mut values);
int_count_around(I::ZERO, near_points, &mut values);
int_count_around(I::ZERO, near_points, &mut values);
if matches!(ctx.base_name, BaseName::Scalbn | BaseName::Ldexp) {
assert_eq!(argnum, 1, "scalbn integer argument should be arg1");
let (emax, emin, emin_sn) = match ctx.fn_ident.math_op().float_ty {
FloatTy::F16 => {
#[cfg(not(f16_enabled))]
unreachable!();
#[cfg(f16_enabled)]
(f16::EXP_MAX, f16::EXP_MIN, f16::EXP_MIN_SUBNORM)
}
FloatTy::F32 => (f32::EXP_MAX, f32::EXP_MIN, f32::EXP_MIN_SUBNORM),
FloatTy::F64 => (f64::EXP_MAX, f64::EXP_MIN, f64::EXP_MIN_SUBNORM),
FloatTy::F128 => {
#[cfg(not(f128_enabled))]
unreachable!();
#[cfg(f128_enabled)]
(f128::EXP_MAX, f128::EXP_MIN, f128::EXP_MIN_SUBNORM)
}
};
// `scalbn`/`ldexp` have their trickiest behavior around exponent limits
int_count_around(emax.cast(), near_points, &mut values);
int_count_around(emin.cast(), near_points, &mut values);
int_count_around(emin_sn.cast(), near_points, &mut values);
int_count_around((-emin_sn).cast(), near_points, &mut values);
// Also check values that cause the maximum possible difference in exponents
int_count_around((emax - emin).cast(), near_points, &mut values);
int_count_around((emin - emax).cast(), near_points, &mut values);
int_count_around((emax - emin_sn).cast(), near_points, &mut values);
int_count_around((emin_sn - emax).cast(), near_points, &mut values);
}
values.sort();
values.dedup();
let count = values.len().try_into().unwrap();
test_log(&format!(
"{gen_kind:?} {basis:?} {fn_ident} arg {arg}/{args}: {count} edge cases",
gen_kind = ctx.gen_kind,
basis = ctx.basis,
fn_ident = ctx.fn_ident,
arg = argnum + 1,
args = ctx.input_count(),
));
(values.into_iter(), count)
}
/// Add `points` values both up and down, starting at and including `x`.
fn int_count_around<I: Int>(x: I, points: u64, values: &mut Vec<I>) {
let mut current = x;
for _ in 0..points {
values.push(current);
current = match current.checked_add(I::ONE) {
Some(v) => v,
None => break,
};
}
current = x;
for _ in 0..points {
values.push(current);
current = match current.checked_sub(I::ONE) {
Some(v) => v,
None => break,
};
}
}
macro_rules! impl_edge_case_input {
($fty:ty) => {
impl<Op> EdgeCaseInput<Op> for ($fty,)
where
Op: MathOp<RustArgs = Self, FTy = $fty>,
{
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let (iter0, steps0) = float_edge_cases::<Op>(ctx, 0);
let iter0 = iter0.map(|v| (v,));
(iter0, steps0)
}
}
impl<Op> EdgeCaseInput<Op> for ($fty, $fty)
where
Op: MathOp<RustArgs = Self, FTy = $fty>,
{
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let (iter0, steps0) = float_edge_cases::<Op>(ctx, 0);
let (iter1, steps1) = float_edge_cases::<Op>(ctx, 1);
let iter =
iter0.flat_map(move |first| iter1.clone().map(move |second| (first, second)));
let count = steps0.checked_mul(steps1).unwrap();
(iter, count)
}
}
impl<Op> EdgeCaseInput<Op> for ($fty, $fty, $fty)
where
Op: MathOp<RustArgs = Self, FTy = $fty>,
{
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let (iter0, steps0) = float_edge_cases::<Op>(ctx, 0);
let (iter1, steps1) = float_edge_cases::<Op>(ctx, 1);
let (iter2, steps2) = float_edge_cases::<Op>(ctx, 2);
let iter = iter0
.flat_map(move |first| iter1.clone().map(move |second| (first, second)))
.flat_map(move |(first, second)| {
iter2.clone().map(move |third| (first, second, third))
});
let count = steps0.checked_mul(steps1).unwrap().checked_mul(steps2).unwrap();
(iter, count)
}
}
impl<Op> EdgeCaseInput<Op> for (i32, $fty)
where
Op: MathOp<RustArgs = Self, FTy = $fty>,
{
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let (iter0, steps0) = int_edge_cases(ctx, 0);
let (iter1, steps1) = float_edge_cases::<Op>(ctx, 1);
let iter =
iter0.flat_map(move |first| iter1.clone().map(move |second| (first, second)));
let count = steps0.checked_mul(steps1).unwrap();
(iter, count)
}
}
impl<Op> EdgeCaseInput<Op> for ($fty, i32)
where
Op: MathOp<RustArgs = Self, FTy = $fty>,
{
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let (iter0, steps0) = float_edge_cases::<Op>(ctx, 0);
let (iter1, steps1) = int_edge_cases(ctx, 1);
let iter =
iter0.flat_map(move |first| iter1.clone().map(move |second| (first, second)));
let count = steps0.checked_mul(steps1).unwrap();
(iter, count)
}
}
};
}
#[cfg(f16_enabled)]
impl_edge_case_input!(f16);
impl_edge_case_input!(f32);
impl_edge_case_input!(f64);
#[cfg(f128_enabled)]
impl_edge_case_input!(f128);
pub fn get_test_cases<Op>(
ctx: &CheckCtx,
) -> (impl Iterator<Item = Op::RustArgs> + Send + use<'_, Op>, u64)
where
Op: MathOp,
Op::RustArgs: EdgeCaseInput<Op>,
{
let (iter, count) = Op::RustArgs::get_cases(ctx);
// Wrap in `KnownSize` so we get an assertion if the cuunt is wrong.
(KnownSize::new(iter, count), count)
}

View file

@ -0,0 +1,125 @@
use std::env;
use std::ops::RangeInclusive;
use std::sync::LazyLock;
use libm::support::Float;
use rand::distr::{Alphanumeric, StandardUniform};
use rand::prelude::Distribution;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use super::KnownSize;
use crate::CheckCtx;
use crate::run_cfg::{int_range, iteration_count};
pub(crate) const SEED_ENV: &str = "LIBM_SEED";
pub static SEED: LazyLock<[u8; 32]> = LazyLock::new(|| {
let s = env::var(SEED_ENV).unwrap_or_else(|_| {
let mut rng = rand::rng();
(0..32).map(|_| rng.sample(Alphanumeric) as char).collect()
});
s.as_bytes().try_into().unwrap_or_else(|_| {
panic!("Seed must be 32 characters, got `{s}`");
})
});
/// Generate a sequence of random values of this type.
pub trait RandomInput: Sized {
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self> + Send, u64);
}
/// Generate a sequence of deterministically random floats.
fn random_floats<F: Float>(count: u64) -> impl Iterator<Item = F>
where
StandardUniform: Distribution<F::Int>,
{
let mut rng = ChaCha8Rng::from_seed(*SEED);
// Generate integers to get a full range of bitpatterns (including NaNs), then convert back
// to the float type.
(0..count).map(move |_| F::from_bits(rng.random::<F::Int>()))
}
/// Generate a sequence of deterministically random `i32`s within a specified range.
fn random_ints(count: u64, range: RangeInclusive<i32>) -> impl Iterator<Item = i32> {
let mut rng = ChaCha8Rng::from_seed(*SEED);
(0..count).map(move |_| rng.random_range::<i32, _>(range.clone()))
}
macro_rules! impl_random_input {
($fty:ty) => {
impl RandomInput for ($fty,) {
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let count = iteration_count(ctx, 0);
let iter = random_floats(count).map(|f: $fty| (f,));
(iter, count)
}
}
impl RandomInput for ($fty, $fty) {
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let count0 = iteration_count(ctx, 0);
let count1 = iteration_count(ctx, 1);
let iter = random_floats(count0)
.flat_map(move |f1: $fty| random_floats(count1).map(move |f2: $fty| (f1, f2)));
(iter, count0 * count1)
}
}
impl RandomInput for ($fty, $fty, $fty) {
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let count0 = iteration_count(ctx, 0);
let count1 = iteration_count(ctx, 1);
let count2 = iteration_count(ctx, 2);
let iter = random_floats(count0).flat_map(move |f1: $fty| {
random_floats(count1).flat_map(move |f2: $fty| {
random_floats(count2).map(move |f3: $fty| (f1, f2, f3))
})
});
(iter, count0 * count1 * count2)
}
}
impl RandomInput for (i32, $fty) {
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let count0 = iteration_count(ctx, 0);
let count1 = iteration_count(ctx, 1);
let range0 = int_range(ctx, 0);
let iter = random_ints(count0, range0)
.flat_map(move |f1: i32| random_floats(count1).map(move |f2: $fty| (f1, f2)));
(iter, count0 * count1)
}
}
impl RandomInput for ($fty, i32) {
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let count0 = iteration_count(ctx, 0);
let count1 = iteration_count(ctx, 1);
let range1 = int_range(ctx, 1);
let iter = random_floats(count0).flat_map(move |f1: $fty| {
random_ints(count1, range1.clone()).map(move |f2: i32| (f1, f2))
});
(iter, count0 * count1)
}
}
};
}
#[cfg(f16_enabled)]
impl_random_input!(f16);
impl_random_input!(f32);
impl_random_input!(f64);
#[cfg(f128_enabled)]
impl_random_input!(f128);
/// Create a test case iterator.
pub fn get_test_cases<RustArgs: RandomInput>(
ctx: &CheckCtx,
) -> (impl Iterator<Item = RustArgs> + Send + use<'_, RustArgs>, u64) {
let (iter, count) = RustArgs::get_cases(ctx);
// Wrap in `KnownSize` so we get an assertion if the cuunt is wrong.
(KnownSize::new(iter, count), count)
}

View file

@ -0,0 +1,253 @@
use std::fmt;
use std::ops::RangeInclusive;
use libm::support::{Float, MinInt};
use crate::domain::get_domain;
use crate::op::OpITy;
use crate::run_cfg::{int_range, iteration_count};
use crate::{CheckCtx, MathOp, linear_ints, logspace};
/// Generate a sequence of inputs that eiher cover the domain in completeness (for smaller float
/// types and single argument functions) or provide evenly spaced inputs across the domain with
/// approximately `u32::MAX` total iterations.
pub trait SpacedInput<Op> {
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self> + Send, u64);
}
/// Construct an iterator from `logspace` and also calculate the total number of steps expected
/// for that iterator.
fn logspace_steps<Op>(
ctx: &CheckCtx,
argnum: usize,
max_steps: u64,
) -> (impl Iterator<Item = Op::FTy> + Clone, u64)
where
Op: MathOp,
OpITy<Op>: TryFrom<u64, Error: fmt::Debug>,
u64: TryFrom<OpITy<Op>, Error: fmt::Debug>,
RangeInclusive<OpITy<Op>>: Iterator,
{
// i8 is a dummy type here, it can be any integer.
let domain = get_domain::<Op::FTy, i8>(ctx.fn_ident, argnum).unwrap_float();
let start = domain.range_start();
let end = domain.range_end();
let max_steps = OpITy::<Op>::try_from(max_steps).unwrap_or(OpITy::<Op>::MAX);
let (iter, steps) = logspace(start, end, max_steps);
// `steps` will be <= the original `max_steps`, which is a `u64`.
(iter, steps.try_into().unwrap())
}
/// Represents the iterator in either `Left` or `Right`.
enum EitherIter<A, B> {
A(A),
B(B),
}
impl<T, A: Iterator<Item = T>, B: Iterator<Item = T>> Iterator for EitherIter<A, B> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
match self {
Self::A(iter) => iter.next(),
Self::B(iter) => iter.next(),
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self {
Self::A(iter) => iter.size_hint(),
Self::B(iter) => iter.size_hint(),
}
}
}
/// Gets the total number of possible values, returning `None` if that number doesn't fit in a
/// `u64`.
fn value_count<F: Float>() -> Option<u64>
where
u64: TryFrom<F::Int>,
{
u64::try_from(F::Int::MAX).ok().and_then(|max| max.checked_add(1))
}
/// Returns an iterator of every possible value of type `F`.
fn all_values<F: Float>() -> impl Iterator<Item = F>
where
RangeInclusive<F::Int>: Iterator<Item = F::Int>,
{
(F::Int::MIN..=F::Int::MAX).map(|bits| F::from_bits(bits))
}
macro_rules! impl_spaced_input {
($fty:ty) => {
impl<Op> SpacedInput<Op> for ($fty,)
where
Op: MathOp<RustArgs = Self, FTy = $fty>,
{
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let max_steps0 = iteration_count(ctx, 0);
// `f16` and `f32` can have exhaustive tests.
match value_count::<Op::FTy>() {
Some(steps0) if steps0 <= max_steps0 => {
let iter0 = all_values();
let iter0 = iter0.map(|v| (v,));
(EitherIter::A(iter0), steps0)
}
_ => {
let (iter0, steps0) = logspace_steps::<Op>(ctx, 0, max_steps0);
let iter0 = iter0.map(|v| (v,));
(EitherIter::B(iter0), steps0)
}
}
}
}
impl<Op> SpacedInput<Op> for ($fty, $fty)
where
Op: MathOp<RustArgs = Self, FTy = $fty>,
{
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let max_steps0 = iteration_count(ctx, 0);
let max_steps1 = iteration_count(ctx, 1);
// `f16` can have exhaustive tests.
match value_count::<Op::FTy>() {
Some(count) if count <= max_steps0 && count <= max_steps1 => {
let iter = all_values()
.flat_map(|first| all_values().map(move |second| (first, second)));
(EitherIter::A(iter), count.checked_mul(count).unwrap())
}
_ => {
let (iter0, steps0) = logspace_steps::<Op>(ctx, 0, max_steps0);
let (iter1, steps1) = logspace_steps::<Op>(ctx, 1, max_steps1);
let iter = iter0.flat_map(move |first| {
iter1.clone().map(move |second| (first, second))
});
let count = steps0.checked_mul(steps1).unwrap();
(EitherIter::B(iter), count)
}
}
}
}
impl<Op> SpacedInput<Op> for ($fty, $fty, $fty)
where
Op: MathOp<RustArgs = Self, FTy = $fty>,
{
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let max_steps0 = iteration_count(ctx, 0);
let max_steps1 = iteration_count(ctx, 1);
let max_steps2 = iteration_count(ctx, 2);
// `f16` can be exhaustive tested if `LIBM_EXTENSIVE_TESTS` is incresed.
match value_count::<Op::FTy>() {
Some(count)
if count <= max_steps0 && count <= max_steps1 && count <= max_steps2 =>
{
let iter = all_values().flat_map(|first| {
all_values().flat_map(move |second| {
all_values().map(move |third| (first, second, third))
})
});
(EitherIter::A(iter), count.checked_pow(3).unwrap())
}
_ => {
let (iter0, steps0) = logspace_steps::<Op>(ctx, 0, max_steps0);
let (iter1, steps1) = logspace_steps::<Op>(ctx, 1, max_steps1);
let (iter2, steps2) = logspace_steps::<Op>(ctx, 2, max_steps2);
let iter = iter0
.flat_map(move |first| iter1.clone().map(move |second| (first, second)))
.flat_map(move |(first, second)| {
iter2.clone().map(move |third| (first, second, third))
});
let count =
steps0.checked_mul(steps1).unwrap().checked_mul(steps2).unwrap();
(EitherIter::B(iter), count)
}
}
}
}
impl<Op> SpacedInput<Op> for (i32, $fty)
where
Op: MathOp<RustArgs = Self, FTy = $fty>,
{
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let range0 = int_range(ctx, 0);
let max_steps0 = iteration_count(ctx, 0);
let max_steps1 = iteration_count(ctx, 1);
match value_count::<Op::FTy>() {
Some(count1) if count1 <= max_steps1 => {
let (iter0, steps0) = linear_ints(range0, max_steps0);
let iter = iter0
.flat_map(move |first| all_values().map(move |second| (first, second)));
(EitherIter::A(iter), steps0.checked_mul(count1).unwrap())
}
_ => {
let (iter0, steps0) = linear_ints(range0, max_steps0);
let (iter1, steps1) = logspace_steps::<Op>(ctx, 1, max_steps1);
let iter = iter0.flat_map(move |first| {
iter1.clone().map(move |second| (first, second))
});
let count = steps0.checked_mul(steps1).unwrap();
(EitherIter::B(iter), count)
}
}
}
}
impl<Op> SpacedInput<Op> for ($fty, i32)
where
Op: MathOp<RustArgs = Self, FTy = $fty>,
{
fn get_cases(ctx: &CheckCtx) -> (impl Iterator<Item = Self>, u64) {
let max_steps0 = iteration_count(ctx, 0);
let range1 = int_range(ctx, 1);
let max_steps1 = iteration_count(ctx, 1);
match value_count::<Op::FTy>() {
Some(count0) if count0 <= max_steps0 => {
let (iter1, steps1) = linear_ints(range1, max_steps1);
let iter = all_values().flat_map(move |first| {
iter1.clone().map(move |second| (first, second))
});
(EitherIter::A(iter), count0.checked_mul(steps1).unwrap())
}
_ => {
let (iter0, steps0) = logspace_steps::<Op>(ctx, 0, max_steps0);
let (iter1, steps1) = linear_ints(range1, max_steps1);
let iter = iter0.flat_map(move |first| {
iter1.clone().map(move |second| (first, second))
});
let count = steps0.checked_mul(steps1).unwrap();
(EitherIter::B(iter), count)
}
}
}
}
};
}
#[cfg(f16_enabled)]
impl_spaced_input!(f16);
impl_spaced_input!(f32);
impl_spaced_input!(f64);
#[cfg(f128_enabled)]
impl_spaced_input!(f128);
/// Create a test case iterator for extensive inputs. Also returns the total test case count.
pub fn get_test_cases<Op>(
ctx: &CheckCtx,
) -> (impl Iterator<Item = Op::RustArgs> + Send + use<'_, Op>, u64)
where
Op: MathOp,
Op::RustArgs: SpacedInput<Op>,
{
Op::RustArgs::get_cases(ctx)
}

View file

@ -0,0 +1,105 @@
#![cfg_attr(f16_enabled, feature(f16))]
#![cfg_attr(f128_enabled, feature(f128))]
#![allow(clippy::unusual_byte_groupings)] // sometimes we group by sign_exp_sig
pub mod domain;
mod f8_impl;
pub mod generate;
#[cfg(feature = "build-mpfr")]
pub mod mpfloat;
mod num;
pub mod op;
mod precision;
mod run_cfg;
mod test_traits;
use std::env;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
use std::sync::LazyLock;
use std::time::SystemTime;
pub use f8_impl::{f8, hf8};
pub use libm::support::{Float, Int, IntTy, MinInt};
pub use num::{FloatExt, linear_ints, logspace};
pub use op::{
BaseName, FloatTy, Identifier, MathOp, OpCFn, OpCRet, OpFTy, OpRustArgs, OpRustFn, OpRustRet,
Ty,
};
pub use precision::{MaybeOverride, SpecialCase, default_ulp};
use run_cfg::extensive_max_iterations;
pub use run_cfg::{
CheckBasis, CheckCtx, EXTENSIVE_ENV, GeneratorKind, bigint_fuzz_iteration_count,
skip_extensive_test,
};
pub use test_traits::{CheckOutput, Hex, TupleCall};
/// Result type for tests is usually from `anyhow`. Most times there is no success value to
/// propagate.
pub type TestResult<T = (), E = anyhow::Error> = Result<T, E>;
/// True if `EMULATED` is set and nonempty. Used to determine how many iterations to run.
pub const fn emulated() -> bool {
match option_env!("EMULATED") {
Some(s) if s.is_empty() => false,
None => false,
Some(_) => true,
}
}
/// True if `CI` is set and nonempty.
pub const fn ci() -> bool {
match option_env!("CI") {
Some(s) if s.is_empty() => false,
None => false,
Some(_) => true,
}
}
/// Print to stderr and additionally log it to `target/test-log.txt`. This is useful for saving
/// output that would otherwise be consumed by the test harness.
pub fn test_log(s: &str) {
// Handle to a file opened in append mode, unless a suitable path can't be determined.
static OUTFILE: LazyLock<Option<File>> = LazyLock::new(|| {
// If the target directory is overridden, use that environment variable. Otherwise, save
// at the default path `{workspace_root}/target`.
let target_dir = match env::var("CARGO_TARGET_DIR") {
Ok(s) => PathBuf::from(s),
Err(_) => {
let Ok(x) = env::var("CARGO_MANIFEST_DIR") else {
return None;
};
PathBuf::from(x).parent().unwrap().parent().unwrap().join("target")
}
};
let outfile = target_dir.join("test-log.txt");
let mut f = File::options()
.create(true)
.append(true)
.open(outfile)
.expect("failed to open logfile");
let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap();
writeln!(f, "\n\nTest run at {}", now.as_secs()).unwrap();
writeln!(f, "arch: {}", env::consts::ARCH).unwrap();
writeln!(f, "os: {}", env::consts::OS).unwrap();
writeln!(f, "bits: {}", usize::BITS).unwrap();
writeln!(f, "emulated: {}", emulated()).unwrap();
writeln!(f, "ci: {}", ci()).unwrap();
writeln!(f, "cargo features: {}", env!("CFG_CARGO_FEATURES")).unwrap();
writeln!(f, "opt level: {}", env!("CFG_OPT_LEVEL")).unwrap();
writeln!(f, "target features: {}", env!("CFG_TARGET_FEATURES")).unwrap();
writeln!(f, "extensive iterations {}", extensive_max_iterations()).unwrap();
Some(f)
});
eprintln!("{s}");
if let Some(mut f) = OUTFILE.as_ref() {
writeln!(f, "{s}").unwrap();
}
}

View file

@ -0,0 +1,603 @@
//! Interfaces needed to support testing with multi-precision floating point numbers.
//!
//! Within this module, the macros create a submodule for each `libm` function. These contain
//! a struct named `Operation` that implements [`MpOp`].
use std::cmp::Ordering;
use rug::Assign;
pub use rug::Float as MpFloat;
use rug::az::{self, Az};
use rug::float::Round::Nearest;
use rug::ops::{PowAssignRound, RemAssignRound};
use crate::{Float, MathOp};
/// Create a multiple-precision float with the correct number of bits for a concrete float type.
fn new_mpfloat<F: Float>() -> MpFloat {
MpFloat::new(F::SIG_BITS + 1)
}
/// Set subnormal emulation and convert to a concrete float type.
fn prep_retval<F: Float>(mp: &mut MpFloat, ord: Ordering) -> F
where
for<'a> &'a MpFloat: az::Cast<F>,
{
mp.subnormalize_ieee_round(ord, Nearest);
(&*mp).az::<F>()
}
/// Structures that represent a float operation.
///
pub trait MpOp: MathOp {
/// The struct itself should hold any context that can be reused among calls to `run` (allocated
/// `MpFloat`s).
type MpTy;
/// Create a new instance.
fn new_mp() -> Self::MpTy;
/// Perform the operation.
///
/// Usually this means assigning inputs to cached floats, performing the operation, applying
/// subnormal approximation, and converting the result back to concrete values.
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet;
}
/// Implement `MpOp` for functions with a single return value.
macro_rules! impl_mp_op {
// Matcher for unary functions
(
fn_name: $fn_name:ident,
RustFn: fn($_fty:ty,) -> $_ret:ty,
attrs: [$($attr:meta),*],
fn_extra: $fn_name_normalized:expr,
) => {
paste::paste! {
$(#[$attr])*
impl MpOp for crate::op::$fn_name::Routine {
type MpTy = MpFloat;
fn new_mp() -> Self::MpTy {
new_mpfloat::<Self::FTy>()
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.assign(input.0);
let ord = this.[< $fn_name_normalized _round >](Nearest);
prep_retval::<Self::RustRet>(this, ord)
}
}
}
};
// Matcher for binary functions
(
fn_name: $fn_name:ident,
RustFn: fn($_fty:ty, $_fty2:ty,) -> $_ret:ty,
attrs: [$($attr:meta),*],
fn_extra: $fn_name_normalized:expr,
) => {
paste::paste! {
$(#[$attr])*
impl MpOp for crate::op::$fn_name::Routine {
type MpTy = (MpFloat, MpFloat);
fn new_mp() -> Self::MpTy {
(new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.0.assign(input.0);
this.1.assign(input.1);
let ord = this.0.[< $fn_name_normalized _round >](&this.1, Nearest);
prep_retval::<Self::RustRet>(&mut this.0, ord)
}
}
}
};
// Matcher for ternary functions
(
fn_name: $fn_name:ident,
RustFn: fn($_fty:ty, $_fty2:ty, $_fty3:ty,) -> $_ret:ty,
attrs: [$($attr:meta),*],
fn_extra: $fn_name_normalized:expr,
) => {
paste::paste! {
$(#[$attr])*
impl MpOp for crate::op::$fn_name::Routine {
type MpTy = (MpFloat, MpFloat, MpFloat);
fn new_mp() -> Self::MpTy {
(
new_mpfloat::<Self::FTy>(),
new_mpfloat::<Self::FTy>(),
new_mpfloat::<Self::FTy>(),
)
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.0.assign(input.0);
this.1.assign(input.1);
this.2.assign(input.2);
let ord = this.0.[< $fn_name_normalized _round >](&this.1, &this.2, Nearest);
prep_retval::<Self::RustRet>(&mut this.0, ord)
}
}
}
};
}
libm_macros::for_each_function! {
callback: impl_mp_op,
emit_types: [RustFn],
skip: [
// Most of these need a manual implementation
// verify-sorted-start
ceil,
ceilf,
ceilf128,
ceilf16,
copysign,
copysignf,
copysignf128,
copysignf16,
fabs,
fabsf,
fabsf128,
fabsf16,floor,
floorf,
floorf128,
floorf16,
fmaximum,
fmaximumf,
fmaximumf128,
fmaximumf16,
fminimum,
fminimumf,
fminimumf128,
fminimumf16,
fmod,
fmodf,
fmodf128,
fmodf16,
frexp,
frexpf,
ilogb,
ilogbf,
jn,
jnf,
ldexp,
ldexpf,
ldexpf128,
ldexpf16,
lgamma_r,
lgammaf_r,
modf,
modff,
nextafter,
nextafterf,
pow,
powf,remquo,
remquof,
rint,
rintf,
rintf128,
rintf16,
round,
roundeven,
roundevenf,
roundevenf128,
roundevenf16,
roundf,
roundf128,
roundf16,
scalbn,
scalbnf,
scalbnf128,
scalbnf16,
sincos,sincosf,
trunc,
truncf,
truncf128,
truncf16,yn,
ynf,
// verify-sorted-end
],
fn_extra: match MACRO_FN_NAME {
// Remap function names that are different between mpfr and libm
expm1 | expm1f => exp_m1,
fabs | fabsf => abs,
fdim | fdimf | fdimf16 | fdimf128 => positive_diff,
fma | fmaf | fmaf128 => mul_add,
fmax | fmaxf | fmaxf16 | fmaxf128 |
fmaximum_num | fmaximum_numf | fmaximum_numf16 | fmaximum_numf128 => max,
fmin | fminf | fminf16 | fminf128 |
fminimum_num | fminimum_numf | fminimum_numf16 | fminimum_numf128 => min,
lgamma | lgammaf => ln_gamma,
log | logf => ln,
log1p | log1pf => ln_1p,
tgamma | tgammaf => gamma,
_ => MACRO_FN_NAME_NORMALIZED
}
}
/// Implement unary functions that don't have a `_round` version
macro_rules! impl_no_round {
// Unary matcher
($($fn_name:ident => $rug_name:ident;)*) => {
paste::paste! {
$( impl_no_round!{ @inner_unary $fn_name, $rug_name } )*
}
};
(@inner_unary $fn_name:ident, $rug_name:ident) => {
impl MpOp for crate::op::$fn_name::Routine {
type MpTy = MpFloat;
fn new_mp() -> Self::MpTy {
new_mpfloat::<Self::FTy>()
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.assign(input.0);
this.$rug_name();
prep_retval::<Self::RustRet>(this, Ordering::Equal)
}
}
};
}
impl_no_round! {
ceil => ceil_mut;
ceilf => ceil_mut;
fabs => abs_mut;
fabsf => abs_mut;
floor => floor_mut;
floorf => floor_mut;
rint => round_even_mut; // FIXME: respect rounding mode
rintf => round_even_mut; // FIXME: respect rounding mode
round => round_mut;
roundeven => round_even_mut;
roundevenf => round_even_mut;
roundf => round_mut;
trunc => trunc_mut;
truncf => trunc_mut;
}
#[cfg(f16_enabled)]
impl_no_round! {
ceilf16 => ceil_mut;
fabsf16 => abs_mut;
floorf16 => floor_mut;
rintf16 => round_even_mut; // FIXME: respect rounding mode
roundf16 => round_mut;
roundevenf16 => round_even_mut;
truncf16 => trunc_mut;
}
#[cfg(f128_enabled)]
impl_no_round! {
ceilf128 => ceil_mut;
fabsf128 => abs_mut;
floorf128 => floor_mut;
rintf128 => round_even_mut; // FIXME: respect rounding mode
roundf128 => round_mut;
roundevenf128 => round_even_mut;
truncf128 => trunc_mut;
}
/// Some functions are difficult to do in a generic way. Implement them here.
macro_rules! impl_op_for_ty {
($fty:ty, $suffix:literal) => {
paste::paste! {
impl MpOp for crate::op::[<modf $suffix>]::Routine {
type MpTy = (MpFloat, MpFloat);
fn new_mp() -> Self::MpTy {
(new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.0.assign(input.0);
this.1.assign(&this.0);
let (ord0, ord1) = this.0.trunc_fract_round(&mut this.1, Nearest);
(
prep_retval::<Self::FTy>(&mut this.1, ord0),
prep_retval::<Self::FTy>(&mut this.0, ord1),
)
}
}
impl MpOp for crate::op::[<pow $suffix>]::Routine {
type MpTy = (MpFloat, MpFloat);
fn new_mp() -> Self::MpTy {
(new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.0.assign(input.0);
this.1.assign(input.1);
let ord = this.0.pow_assign_round(&this.1, Nearest);
prep_retval::<Self::RustRet>(&mut this.0, ord)
}
}
impl MpOp for crate::op::[<frexp $suffix>]::Routine {
type MpTy = MpFloat;
fn new_mp() -> Self::MpTy {
new_mpfloat::<Self::FTy>()
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.assign(input.0);
let exp = this.frexp_mut();
(prep_retval::<Self::FTy>(this, Ordering::Equal), exp)
}
}
impl MpOp for crate::op::[<ilogb $suffix>]::Routine {
type MpTy = MpFloat;
fn new_mp() -> Self::MpTy {
new_mpfloat::<Self::FTy>()
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.assign(input.0);
// `get_exp` follows `frexp` for `0.5 <= |m| < 1.0`. Adjust the exponent by
// one to scale the significand to `1.0 <= |m| < 2.0`.
this.get_exp().map(|v| v - 1).unwrap_or_else(|| {
if this.is_infinite() {
i32::MAX
} else {
// Zero or NaN
i32::MIN
}
})
}
}
impl MpOp for crate::op::[<jn $suffix>]::Routine {
type MpTy = MpFloat;
fn new_mp() -> Self::MpTy {
new_mpfloat::<Self::FTy>()
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
let (n, x) = input;
this.assign(x);
let ord = this.jn_round(n, Nearest);
prep_retval::<Self::FTy>(this, ord)
}
}
impl MpOp for crate::op::[<sincos $suffix>]::Routine {
type MpTy = (MpFloat, MpFloat);
fn new_mp() -> Self::MpTy {
(new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.0.assign(input.0);
this.1.assign(0.0);
let (sord, cord) = this.0.sin_cos_round(&mut this.1, Nearest);
(
prep_retval::<Self::FTy>(&mut this.0, sord),
prep_retval::<Self::FTy>(&mut this.1, cord)
)
}
}
impl MpOp for crate::op::[<remquo $suffix>]::Routine {
type MpTy = (MpFloat, MpFloat);
fn new_mp() -> Self::MpTy {
(
new_mpfloat::<Self::FTy>(),
new_mpfloat::<Self::FTy>(),
)
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.0.assign(input.0);
this.1.assign(input.1);
let (ord, q) = this.0.remainder_quo31_round(&this.1, Nearest);
(prep_retval::<Self::FTy>(&mut this.0, ord), q)
}
}
impl MpOp for crate::op::[<yn $suffix>]::Routine {
type MpTy = MpFloat;
fn new_mp() -> Self::MpTy {
new_mpfloat::<Self::FTy>()
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
let (n, x) = input;
this.assign(x);
let ord = this.yn_round(n, Nearest);
prep_retval::<Self::FTy>(this, ord)
}
}
}
};
}
/// Version of `impl_op_for_ty` with only functions that have `f16` and `f128` implementations.
macro_rules! impl_op_for_ty_all {
($fty:ty, $suffix:literal) => {
paste::paste! {
impl MpOp for crate::op::[<copysign $suffix>]::Routine {
type MpTy = (MpFloat, MpFloat);
fn new_mp() -> Self::MpTy {
(new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.0.assign(input.0);
this.1.assign(input.1);
this.0.copysign_mut(&this.1);
prep_retval::<Self::RustRet>(&mut this.0, Ordering::Equal)
}
}
impl MpOp for crate::op::[<fmod $suffix>]::Routine {
type MpTy = (MpFloat, MpFloat);
fn new_mp() -> Self::MpTy {
(new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.0.assign(input.0);
this.1.assign(input.1);
let ord = this.0.rem_assign_round(&this.1, Nearest);
prep_retval::<Self::RustRet>(&mut this.0, ord)
}
}
impl MpOp for crate::op::[< fmaximum $suffix >]::Routine {
type MpTy = (MpFloat, MpFloat);
fn new_mp() -> Self::MpTy {
(new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.0.assign(input.0);
this.1.assign(input.1);
let ord = if this.0.is_nan() || this.1.is_nan() {
this.0.assign($fty::NAN);
Ordering::Equal
} else {
this.0.max_round(&this.1, Nearest)
};
prep_retval::<Self::RustRet>(&mut this.0, ord)
}
}
impl MpOp for crate::op::[< fminimum $suffix >]::Routine {
type MpTy = (MpFloat, MpFloat);
fn new_mp() -> Self::MpTy {
(new_mpfloat::<Self::FTy>(), new_mpfloat::<Self::FTy>())
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.0.assign(input.0);
this.1.assign(input.1);
let ord = if this.0.is_nan() || this.1.is_nan() {
this.0.assign($fty::NAN);
Ordering::Equal
} else {
this.0.min_round(&this.1, Nearest)
};
prep_retval::<Self::RustRet>(&mut this.0, ord)
}
}
// `ldexp` and `scalbn` are the same for binary floating point, so just forward all
// methods.
impl MpOp for crate::op::[<ldexp $suffix>]::Routine {
type MpTy = <crate::op::[<scalbn $suffix>]::Routine as MpOp>::MpTy;
fn new_mp() -> Self::MpTy {
<crate::op::[<scalbn $suffix>]::Routine as MpOp>::new_mp()
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
<crate::op::[<scalbn $suffix>]::Routine as MpOp>::run(this, input)
}
}
impl MpOp for crate::op::[<scalbn $suffix>]::Routine {
type MpTy = MpFloat;
fn new_mp() -> Self::MpTy {
new_mpfloat::<Self::FTy>()
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.assign(input.0);
*this <<= input.1;
prep_retval::<Self::FTy>(this, Ordering::Equal)
}
}
}
};
}
impl_op_for_ty!(f32, "f");
impl_op_for_ty!(f64, "");
#[cfg(f16_enabled)]
impl_op_for_ty_all!(f16, "f16");
impl_op_for_ty_all!(f32, "f");
impl_op_for_ty_all!(f64, "");
#[cfg(f128_enabled)]
impl_op_for_ty_all!(f128, "f128");
// `lgamma_r` is not a simple suffix so we can't use the above macro.
impl MpOp for crate::op::lgamma_r::Routine {
type MpTy = MpFloat;
fn new_mp() -> Self::MpTy {
new_mpfloat::<Self::FTy>()
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.assign(input.0);
let (sign, ord) = this.ln_abs_gamma_round(Nearest);
let ret = prep_retval::<Self::FTy>(this, ord);
(ret, sign as i32)
}
}
impl MpOp for crate::op::lgammaf_r::Routine {
type MpTy = MpFloat;
fn new_mp() -> Self::MpTy {
new_mpfloat::<Self::FTy>()
}
fn run(this: &mut Self::MpTy, input: Self::RustArgs) -> Self::RustRet {
this.assign(input.0);
let (sign, ord) = this.ln_abs_gamma_round(Nearest);
let ret = prep_retval::<Self::FTy>(this, ord);
(ret, sign as i32)
}
}
/* stub implementations so we don't need to special case them */
impl MpOp for crate::op::nextafter::Routine {
type MpTy = MpFloat;
fn new_mp() -> Self::MpTy {
unimplemented!("nextafter does not yet have a MPFR operation");
}
fn run(_this: &mut Self::MpTy, _input: Self::RustArgs) -> Self::RustRet {
unimplemented!("nextafter does not yet have a MPFR operation");
}
}
impl MpOp for crate::op::nextafterf::Routine {
type MpTy = MpFloat;
fn new_mp() -> Self::MpTy {
unimplemented!("nextafter does not yet have a MPFR operation");
}
fn run(_this: &mut Self::MpTy, _input: Self::RustArgs) -> Self::RustRet {
unimplemented!("nextafter does not yet have a MPFR operation");
}
}

View file

@ -0,0 +1,529 @@
//! Helpful numeric operations.
use std::cmp::min;
use std::ops::RangeInclusive;
use libm::support::Float;
use crate::{Int, MinInt};
/// Extension to `libm`'s `Float` trait with methods that are useful for tests but not
/// needed in `libm` itself.
pub trait FloatExt: Float {
/// The minimum subnormal number.
const TINY_BITS: Self::Int = Self::Int::ONE;
/// Retrieve additional constants for this float type.
fn consts() -> Consts<Self> {
Consts::new()
}
/// Increment by one ULP, saturating at infinity.
fn next_up(self) -> Self {
let bits = self.to_bits();
if self.is_nan() || bits == Self::INFINITY.to_bits() {
return self;
}
let abs = self.abs().to_bits();
let next_bits = if abs == Self::Int::ZERO {
// Next up from 0 is the smallest subnormal
Self::TINY_BITS
} else if bits == abs {
// Positive: counting up is more positive
bits + Self::Int::ONE
} else {
// Negative: counting down is more positive
bits - Self::Int::ONE
};
Self::from_bits(next_bits)
}
/// A faster way to effectively call `next_up` `n` times.
fn n_up(self, n: Self::Int) -> Self {
let bits = self.to_bits();
if self.is_nan() || bits == Self::INFINITY.to_bits() || n == Self::Int::ZERO {
return self;
}
let abs = self.abs().to_bits();
let is_positive = bits == abs;
let crosses_zero = !is_positive && n > abs;
let inf_bits = Self::INFINITY.to_bits();
let next_bits = if abs == Self::Int::ZERO {
min(n, inf_bits)
} else if crosses_zero {
min(n - abs, inf_bits)
} else if is_positive {
// Positive, counting up is more positive but this may overflow
match bits.checked_add(n) {
Some(v) if v >= inf_bits => inf_bits,
Some(v) => v,
None => inf_bits,
}
} else {
// Negative, counting down is more positive
bits - n
};
Self::from_bits(next_bits)
}
/// Decrement by one ULP, saturating at negative infinity.
fn next_down(self) -> Self {
let bits = self.to_bits();
if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
return self;
}
let abs = self.abs().to_bits();
let next_bits = if abs == Self::Int::ZERO {
// Next up from 0 is the smallest negative subnormal
Self::TINY_BITS | Self::SIGN_MASK
} else if bits == abs {
// Positive: counting down is more negative
bits - Self::Int::ONE
} else {
// Negative: counting up is more negative
bits + Self::Int::ONE
};
Self::from_bits(next_bits)
}
/// A faster way to effectively call `next_down` `n` times.
fn n_down(self, n: Self::Int) -> Self {
let bits = self.to_bits();
if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() || n == Self::Int::ZERO {
return self;
}
let abs = self.abs().to_bits();
let is_positive = bits == abs;
let crosses_zero = is_positive && n > abs;
let inf_bits = Self::INFINITY.to_bits();
let ninf_bits = Self::NEG_INFINITY.to_bits();
let next_bits = if abs == Self::Int::ZERO {
min(n, inf_bits) | Self::SIGN_MASK
} else if crosses_zero {
min(n - abs, inf_bits) | Self::SIGN_MASK
} else if is_positive {
// Positive, counting down is more negative
bits - n
} else {
// Negative, counting up is more negative but this may overflow
match bits.checked_add(n) {
Some(v) if v > ninf_bits => ninf_bits,
Some(v) => v,
None => ninf_bits,
}
};
Self::from_bits(next_bits)
}
}
impl<F> FloatExt for F where F: Float {}
/// Extra constants that are useful for tests.
#[derive(Debug, Clone, Copy)]
pub struct Consts<F> {
/// The default quiet NaN, which is also the minimum quiet NaN.
pub pos_nan: F,
/// The default quiet NaN with negative sign.
pub neg_nan: F,
/// NaN with maximum (unsigned) significand to be a quiet NaN. The significand is saturated.
pub max_qnan: F,
/// NaN with minimum (unsigned) significand to be a signaling NaN.
pub min_snan: F,
/// NaN with maximum (unsigned) significand to be a signaling NaN.
pub max_snan: F,
pub neg_max_qnan: F,
pub neg_min_snan: F,
pub neg_max_snan: F,
}
impl<F: FloatExt> Consts<F> {
fn new() -> Self {
let top_sigbit_mask = F::Int::ONE << (F::SIG_BITS - 1);
let pos_nan = F::EXP_MASK | top_sigbit_mask;
let max_qnan = F::EXP_MASK | F::SIG_MASK;
let min_snan = F::EXP_MASK | F::Int::ONE;
let max_snan = (F::EXP_MASK | F::SIG_MASK) ^ top_sigbit_mask;
let neg_nan = pos_nan | F::SIGN_MASK;
let neg_max_qnan = max_qnan | F::SIGN_MASK;
let neg_min_snan = min_snan | F::SIGN_MASK;
let neg_max_snan = max_snan | F::SIGN_MASK;
Self {
pos_nan: F::from_bits(pos_nan),
neg_nan: F::from_bits(neg_nan),
max_qnan: F::from_bits(max_qnan),
min_snan: F::from_bits(min_snan),
max_snan: F::from_bits(max_snan),
neg_max_qnan: F::from_bits(neg_max_qnan),
neg_min_snan: F::from_bits(neg_min_snan),
neg_max_snan: F::from_bits(neg_max_snan),
}
}
pub fn iter(self) -> impl Iterator<Item = F> {
// Destructure so we get unused warnings if we forget a list entry.
let Self {
pos_nan,
neg_nan,
max_qnan,
min_snan,
max_snan,
neg_max_qnan,
neg_min_snan,
neg_max_snan,
} = self;
[pos_nan, neg_nan, max_qnan, min_snan, max_snan, neg_max_qnan, neg_min_snan, neg_max_snan]
.into_iter()
}
}
/// Return the number of steps between two floats, returning `None` if either input is NaN.
///
/// This is the number of steps needed for `n_up` or `n_down` to go between values. Infinities
/// are treated the same as those functions (will return the nearest finite value), and only one
/// of `-0` or `+0` is counted. It does not matter which value is greater.
pub fn ulp_between<F: Float>(x: F, y: F) -> Option<F::Int> {
let a = as_ulp_steps(x)?;
let b = as_ulp_steps(y)?;
Some(a.abs_diff(b))
}
/// Return the (signed) number of steps from zero to `x`.
fn as_ulp_steps<F: Float>(x: F) -> Option<F::SignedInt> {
let s = x.to_bits_signed();
let val = if s >= F::SignedInt::ZERO {
// each increment from `s = 0` is one step up from `x = 0.0`
s
} else {
// each increment from `s = F::SignedInt::MIN` is one step down from `x = -0.0`
F::SignedInt::MIN - s
};
// If `x` is NaN, return `None`
(!x.is_nan()).then_some(val)
}
/// An iterator that returns floats with linearly spaced integer representations, which translates
/// to logarithmic spacing of their values.
///
/// Note that this tends to skip negative zero, so that needs to be checked explicitly.
///
/// Returns `(iterator, iterator_length)`.
pub fn logspace<F: FloatExt>(
start: F,
end: F,
steps: F::Int,
) -> (impl Iterator<Item = F> + Clone, F::Int)
where
RangeInclusive<F::Int>: Iterator,
{
assert!(!start.is_nan());
assert!(!end.is_nan());
assert!(end >= start);
let steps = steps.checked_sub(F::Int::ONE).expect("`steps` must be at least 2");
let between = ulp_between(start, end).expect("`start` or `end` is NaN");
let spacing = (between / steps).max(F::Int::ONE);
let steps = steps.min(between); // At maximum, one step per ULP
let mut x = start;
(
(F::Int::ZERO..=steps).map(move |_| {
let ret = x;
x = x.n_up(spacing);
ret
}),
steps + F::Int::ONE,
)
}
/// Returns an iterator of up to `steps` integers evenly distributed.
pub fn linear_ints(
range: RangeInclusive<i32>,
steps: u64,
) -> (impl Iterator<Item = i32> + Clone, u64) {
let steps = steps.checked_sub(1).unwrap();
let between = u64::from(range.start().abs_diff(*range.end()));
let spacing = i32::try_from((between / steps).max(1)).unwrap();
let steps = steps.min(between);
let mut x: i32 = *range.start();
(
(0..=steps).map(move |_| {
let res = x;
// Wrapping add to avoid panic on last item (where `x` could overflow past i32::MAX as
// there is no next item).
x = x.wrapping_add(spacing);
res
}),
steps + 1,
)
}
#[cfg(test)]
mod tests {
use std::cmp::max;
use super::*;
use crate::f8;
#[test]
fn test_next_up_down() {
for (i, v) in f8::ALL.into_iter().enumerate() {
let down = v.next_down().to_bits();
let up = v.next_up().to_bits();
if i == 0 {
assert_eq!(down, f8::NEG_INFINITY.to_bits(), "{i} next_down({v:#010b})");
} else {
let expected =
if v == f8::ZERO { 1 | f8::SIGN_MASK } else { f8::ALL[i - 1].to_bits() };
assert_eq!(down, expected, "{i} next_down({v:#010b})");
}
if i == f8::ALL_LEN - 1 {
assert_eq!(up, f8::INFINITY.to_bits(), "{i} next_up({v:#010b})");
} else {
let expected = if v == f8::NEG_ZERO { 1 } else { f8::ALL[i + 1].to_bits() };
assert_eq!(up, expected, "{i} next_up({v:#010b})");
}
}
}
#[test]
fn test_next_up_down_inf_nan() {
assert_eq!(f8::NEG_INFINITY.next_up().to_bits(), f8::ALL[0].to_bits(),);
assert_eq!(f8::NEG_INFINITY.next_down().to_bits(), f8::NEG_INFINITY.to_bits(),);
assert_eq!(f8::INFINITY.next_down().to_bits(), f8::ALL[f8::ALL_LEN - 1].to_bits(),);
assert_eq!(f8::INFINITY.next_up().to_bits(), f8::INFINITY.to_bits(),);
assert_eq!(f8::NAN.next_up().to_bits(), f8::NAN.to_bits(),);
assert_eq!(f8::NAN.next_down().to_bits(), f8::NAN.to_bits(),);
}
#[test]
fn test_n_up_down_quick() {
assert_eq!(f8::ALL[0].n_up(4).to_bits(), f8::ALL[4].to_bits(),);
assert_eq!(
f8::ALL[f8::ALL_LEN - 1].n_down(4).to_bits(),
f8::ALL[f8::ALL_LEN - 5].to_bits(),
);
// Check around zero
assert_eq!(f8::from_bits(0b0).n_up(7).to_bits(), 0b0_0000_111);
assert_eq!(f8::from_bits(0b0).n_down(7).to_bits(), 0b1_0000_111);
// Check across zero
assert_eq!(f8::from_bits(0b1_0000_111).n_up(8).to_bits(), 0b0_0000_001);
assert_eq!(f8::from_bits(0b0_0000_111).n_down(8).to_bits(), 0b1_0000_001);
}
#[test]
fn test_n_up_down_one() {
// Verify that `n_up(1)` and `n_down(1)` are the same as `next_up()` and next_down()`.`
for i in 0..u8::MAX {
let v = f8::from_bits(i);
assert_eq!(v.next_up().to_bits(), v.n_up(1).to_bits());
assert_eq!(v.next_down().to_bits(), v.n_down(1).to_bits());
}
}
#[test]
fn test_n_up_down_inf_nan_zero() {
assert_eq!(f8::NEG_INFINITY.n_up(1).to_bits(), f8::ALL[0].to_bits());
assert_eq!(f8::NEG_INFINITY.n_up(239).to_bits(), f8::ALL[f8::ALL_LEN - 1].to_bits());
assert_eq!(f8::NEG_INFINITY.n_up(240).to_bits(), f8::INFINITY.to_bits());
assert_eq!(f8::NEG_INFINITY.n_down(u8::MAX).to_bits(), f8::NEG_INFINITY.to_bits());
assert_eq!(f8::INFINITY.n_down(1).to_bits(), f8::ALL[f8::ALL_LEN - 1].to_bits());
assert_eq!(f8::INFINITY.n_down(239).to_bits(), f8::ALL[0].to_bits());
assert_eq!(f8::INFINITY.n_down(240).to_bits(), f8::NEG_INFINITY.to_bits());
assert_eq!(f8::INFINITY.n_up(u8::MAX).to_bits(), f8::INFINITY.to_bits());
assert_eq!(f8::NAN.n_up(u8::MAX).to_bits(), f8::NAN.to_bits());
assert_eq!(f8::NAN.n_down(u8::MAX).to_bits(), f8::NAN.to_bits());
assert_eq!(f8::ZERO.n_down(1).to_bits(), f8::TINY_BITS | f8::SIGN_MASK);
assert_eq!(f8::NEG_ZERO.n_up(1).to_bits(), f8::TINY_BITS);
}
/// True if the specified range of `f8::ALL` includes both +0 and -0
fn crossed_zero(start: usize, end: usize) -> bool {
let crossed = &f8::ALL[start..=end];
crossed.iter().any(|f| f8::eq_repr(*f, f8::ZERO))
&& crossed.iter().any(|f| f8::eq_repr(*f, f8::NEG_ZERO))
}
#[test]
fn test_n_up_down() {
for (i, v) in f8::ALL.into_iter().enumerate() {
for n in 0..f8::ALL_LEN {
let down = v.n_down(n as u8).to_bits();
let up = v.n_up(n as u8).to_bits();
if let Some(down_exp_idx) = i.checked_sub(n) {
// No overflow
let mut expected = f8::ALL[down_exp_idx].to_bits();
if n >= 1 && crossed_zero(down_exp_idx, i) {
// If both -0 and +0 are included, we need to adjust our expected value
match down_exp_idx.checked_sub(1) {
Some(v) => expected = f8::ALL[v].to_bits(),
// Saturate to -inf if we are out of values
None => expected = f8::NEG_INFINITY.to_bits(),
}
}
assert_eq!(down, expected, "{i} {n} n_down({v:#010b})");
} else {
// Overflow to -inf
assert_eq!(down, f8::NEG_INFINITY.to_bits(), "{i} {n} n_down({v:#010b})");
}
let mut up_exp_idx = i + n;
if up_exp_idx < f8::ALL_LEN {
// No overflow
if n >= 1 && up_exp_idx < f8::ALL_LEN && crossed_zero(i, up_exp_idx) {
// If both -0 and +0 are included, we need to adjust our expected value
up_exp_idx += 1;
}
let expected = if up_exp_idx >= f8::ALL_LEN {
f8::INFINITY.to_bits()
} else {
f8::ALL[up_exp_idx].to_bits()
};
assert_eq!(up, expected, "{i} {n} n_up({v:#010b})");
} else {
// Overflow to +inf
assert_eq!(up, f8::INFINITY.to_bits(), "{i} {n} n_up({v:#010b})");
}
}
}
}
#[test]
fn test_ulp_between() {
for (i, x) in f8::ALL.into_iter().enumerate() {
for (j, y) in f8::ALL.into_iter().enumerate() {
let ulp = ulp_between(x, y).unwrap();
let make_msg = || format!("i: {i} j: {j} x: {x:b} y: {y:b} ulp {ulp}");
let i_low = min(i, j);
let i_hi = max(i, j);
let mut expected = u8::try_from(i_hi - i_low).unwrap();
if crossed_zero(i_low, i_hi) {
expected -= 1;
}
assert_eq!(ulp, expected, "{}", make_msg());
// Skip if either are zero since `next_{up,down}` will count over it
let either_zero = x == f8::ZERO || y == f8::ZERO;
if x < y && !either_zero {
assert_eq!(x.n_up(ulp).to_bits(), y.to_bits(), "{}", make_msg());
assert_eq!(y.n_down(ulp).to_bits(), x.to_bits(), "{}", make_msg());
} else if !either_zero {
assert_eq!(y.n_up(ulp).to_bits(), x.to_bits(), "{}", make_msg());
assert_eq!(x.n_down(ulp).to_bits(), y.to_bits(), "{}", make_msg());
}
}
}
}
#[test]
fn test_ulp_between_inf_nan_zero() {
assert_eq!(ulp_between(f8::NEG_INFINITY, f8::INFINITY).unwrap(), f8::ALL_LEN as u8);
assert_eq!(ulp_between(f8::INFINITY, f8::NEG_INFINITY).unwrap(), f8::ALL_LEN as u8);
assert_eq!(
ulp_between(f8::NEG_INFINITY, f8::ALL[f8::ALL_LEN - 1]).unwrap(),
f8::ALL_LEN as u8 - 1
);
assert_eq!(ulp_between(f8::INFINITY, f8::ALL[0]).unwrap(), f8::ALL_LEN as u8 - 1);
assert_eq!(ulp_between(f8::ZERO, f8::NEG_ZERO).unwrap(), 0);
assert_eq!(ulp_between(f8::NAN, f8::ZERO), None);
assert_eq!(ulp_between(f8::ZERO, f8::NAN), None);
}
#[test]
fn test_logspace() {
let (ls, count) = logspace(f8::from_bits(0x0), f8::from_bits(0x4), 2);
let ls: Vec<_> = ls.collect();
let exp = [f8::from_bits(0x0), f8::from_bits(0x4)];
assert_eq!(ls, exp);
assert_eq!(ls.len(), usize::from(count));
let (ls, count) = logspace(f8::from_bits(0x0), f8::from_bits(0x4), 3);
let ls: Vec<_> = ls.collect();
let exp = [f8::from_bits(0x0), f8::from_bits(0x2), f8::from_bits(0x4)];
assert_eq!(ls, exp);
assert_eq!(ls.len(), usize::from(count));
// Check that we include all values with no repeats if `steps` exceeds the maximum number
// of steps.
let (ls, count) = logspace(f8::from_bits(0x0), f8::from_bits(0x3), 10);
let ls: Vec<_> = ls.collect();
let exp = [f8::from_bits(0x0), f8::from_bits(0x1), f8::from_bits(0x2), f8::from_bits(0x3)];
assert_eq!(ls, exp);
assert_eq!(ls.len(), usize::from(count));
}
#[test]
fn test_linear_ints() {
let (ints, count) = linear_ints(0..=4, 2);
let ints: Vec<_> = ints.collect();
let exp = [0, 4];
assert_eq!(ints, exp);
assert_eq!(ints.len(), usize::try_from(count).unwrap());
let (ints, count) = linear_ints(0..=4, 3);
let ints: Vec<_> = ints.collect();
let exp = [0, 2, 4];
assert_eq!(ints, exp);
assert_eq!(ints.len(), usize::try_from(count).unwrap());
// Check that we include all values with no repeats if `steps` exceeds the maximum number
// of steps.
let (ints, count) = linear_ints(0x0..=0x3, 10);
let ints: Vec<_> = ints.collect();
let exp = [0, 1, 2, 3];
assert_eq!(ints, exp);
assert_eq!(ints.len(), usize::try_from(count).unwrap());
// Check that there are no panics around `i32::MAX`.
let (ints, count) = linear_ints(i32::MAX - 1..=i32::MAX, 5);
let ints: Vec<_> = ints.collect();
let exp = [i32::MAX - 1, i32::MAX];
assert_eq!(ints, exp);
assert_eq!(ints.len(), usize::try_from(count).unwrap());
}
#[test]
fn test_consts() {
let Consts {
pos_nan,
neg_nan,
max_qnan,
min_snan,
max_snan,
neg_max_qnan,
neg_min_snan,
neg_max_snan,
} = f8::consts();
assert_eq!(pos_nan.to_bits(), 0b0_1111_100);
assert_eq!(neg_nan.to_bits(), 0b1_1111_100);
assert_eq!(max_qnan.to_bits(), 0b0_1111_111);
assert_eq!(min_snan.to_bits(), 0b0_1111_001);
assert_eq!(max_snan.to_bits(), 0b0_1111_011);
assert_eq!(neg_max_qnan.to_bits(), 0b1_1111_111);
assert_eq!(neg_min_snan.to_bits(), 0b1_1111_001);
assert_eq!(neg_max_snan.to_bits(), 0b1_1111_011);
}
}

View file

@ -0,0 +1,151 @@
//! Types representing individual functions.
//!
//! Each routine gets a module with its name, e.g. `mod sinf { /* ... */ }`. The module
//! contains a unit struct `Routine` which implements `MathOp`.
//!
//! Basically everything could be called a "function" here, so we loosely use the following
//! terminology:
//!
//! - "Function": the math operation that does not have an associated precision. E.g. `f(x) = e^x`,
//! `f(x) = log(x)`.
//! - "Routine": A code implementation of a math operation with a specific precision. E.g. `exp`,
//! `expf`, `expl`, `log`, `logf`.
//! - "Operation" / "Op": Something that relates a routine to a function or is otherwise higher
//! level. `Op` is also used as the name for generic parameters since it is terse.
use std::fmt;
use std::panic::{RefUnwindSafe, UnwindSafe};
pub use shared::{ALL_OPERATIONS, FloatTy, MathOpInfo, Ty};
use crate::{CheckOutput, Float, TupleCall};
mod shared {
include!("../../libm-macros/src/shared.rs");
}
/// An enum representing each possible symbol name (`sin`, `sinf`, `sinl`, etc).
#[libm_macros::function_enum(BaseName)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum Identifier {}
impl fmt::Display for Identifier {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
/// The name without any type specifier, e.g. `sin` and `sinf` both become `sin`.
#[libm_macros::base_name_enum]
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum BaseName {}
impl fmt::Display for BaseName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
/// Attributes ascribed to a `libm` routine including signature, type information,
/// and naming.
pub trait MathOp {
/// The float type used for this operation.
type FTy: Float;
/// The function type representing the signature in a C library.
type CFn: Copy;
/// Arguments passed to the C library function as a tuple. These may include `&mut` return
/// values.
type CArgs<'a>
where
Self: 'a;
/// The type returned by C implementations.
type CRet;
/// The signature of the Rust function as a `fn(...) -> ...` type.
type RustFn: Copy + UnwindSafe;
/// Arguments passed to the Rust library function as a tuple.
///
/// The required `TupleCall` bounds ensure this type can be passed either to the C function or
/// to the Rust function.
type RustArgs: Copy
+ TupleCall<Self::RustFn, Output = Self::RustRet>
+ TupleCall<Self::CFn, Output = Self::RustRet>
+ RefUnwindSafe;
/// Type returned from the Rust function.
type RustRet: CheckOutput<Self::RustArgs>;
/// The name of this function, including suffix (e.g. `sin`, `sinf`).
const IDENTIFIER: Identifier;
/// The name as a string.
const NAME: &'static str = Self::IDENTIFIER.as_str();
/// The name of the function excluding the type suffix, e.g. `sin` and `sinf` are both `sin`.
const BASE_NAME: BaseName = Self::IDENTIFIER.base_name();
/// The function in `libm` which can be called.
const ROUTINE: Self::RustFn;
}
/// Access the associated `FTy` type from an op (helper to avoid ambiguous associated types).
pub type OpFTy<Op> = <Op as MathOp>::FTy;
/// Access the associated `FTy::Int` type from an op (helper to avoid ambiguous associated types).
pub type OpITy<Op> = <<Op as MathOp>::FTy as Float>::Int;
/// Access the associated `CFn` type from an op (helper to avoid ambiguous associated types).
pub type OpCFn<Op> = <Op as MathOp>::CFn;
/// Access the associated `CRet` type from an op (helper to avoid ambiguous associated types).
pub type OpCRet<Op> = <Op as MathOp>::CRet;
/// Access the associated `RustFn` type from an op (helper to avoid ambiguous associated types).
pub type OpRustFn<Op> = <Op as MathOp>::RustFn;
/// Access the associated `RustArgs` type from an op (helper to avoid ambiguous associated types).
pub type OpRustArgs<Op> = <Op as MathOp>::RustArgs;
/// Access the associated `RustRet` type from an op (helper to avoid ambiguous associated types).
pub type OpRustRet<Op> = <Op as MathOp>::RustRet;
macro_rules! do_thing {
// Matcher for unary functions
(
fn_name: $fn_name:ident,
FTy: $FTy:ty,
CFn: $CFn:ty,
CArgs: $CArgs:ty,
CRet: $CRet:ty,
RustFn: $RustFn:ty,
RustArgs: $RustArgs:ty,
RustRet: $RustRet:ty,
attrs: [$($attr:meta),*],
) => {
paste::paste! {
$(#[$attr])*
pub mod $fn_name {
use super::*;
pub struct Routine;
impl MathOp for Routine {
type FTy = $FTy;
type CFn = for<'a> $CFn;
type CArgs<'a> = $CArgs where Self: 'a;
type CRet = $CRet;
type RustFn = $RustFn;
type RustArgs = $RustArgs;
type RustRet = $RustRet;
const IDENTIFIER: Identifier = Identifier::[< $fn_name:camel >];
const ROUTINE: Self::RustFn = libm::$fn_name;
}
}
}
};
}
libm_macros::for_each_function! {
callback: do_thing,
emit_types: all,
}

View file

@ -0,0 +1,573 @@
//! Configuration for skipping or changing the result for individual test cases (inputs) rather
//! than ignoring entire tests.
use core::f32;
use CheckBasis::{Mpfr, Musl};
use libm::support::CastFrom;
use {BaseName as Bn, Identifier as Id};
use crate::{BaseName, CheckBasis, CheckCtx, Float, Identifier, Int, TestResult};
/// Type implementing [`IgnoreCase`].
pub struct SpecialCase;
/// ULP allowed to differ from the results returned by a test basis.
#[allow(clippy::single_match)]
pub fn default_ulp(ctx: &CheckCtx) -> u32 {
// ULP compared to the infinite (MPFR) result.
let mut ulp = match ctx.base_name {
// Operations that require exact results. This list should correlate with what we
// have documented at <https://doc.rust-lang.org/std/primitive.f32.html>.
Bn::Ceil
| Bn::Copysign
| Bn::Fabs
| Bn::Fdim
| Bn::Floor
| Bn::Fma
| Bn::Fmax
| Bn::Fmaximum
| Bn::FmaximumNum
| Bn::Fmin
| Bn::Fminimum
| Bn::FminimumNum
| Bn::Fmod
| Bn::Frexp
| Bn::Ilogb
| Bn::Ldexp
| Bn::Modf
| Bn::Nextafter
| Bn::Remainder
| Bn::Remquo
| Bn::Rint
| Bn::Round
| Bn::Roundeven
| Bn::Scalbn
| Bn::Sqrt
| Bn::Trunc => 0,
// Operations that aren't required to be exact, but our implementations are.
Bn::Cbrt => 0,
// Bessel functions have large inaccuracies.
Bn::J0 | Bn::J1 | Bn::Y0 | Bn::Y1 | Bn::Jn | Bn::Yn => 8_000_000,
// For all other operations, specify our implementation's worst case precision.
Bn::Acos => 1,
Bn::Acosh => 4,
Bn::Asin => 1,
Bn::Asinh => 2,
Bn::Atan => 1,
Bn::Atan2 => 2,
Bn::Atanh => 2,
Bn::Cos => 1,
Bn::Cosh => 1,
Bn::Erf => 1,
Bn::Erfc => 4,
Bn::Exp => 1,
Bn::Exp10 => 6,
Bn::Exp2 => 1,
Bn::Expm1 => 1,
Bn::Hypot => 1,
Bn::Lgamma | Bn::LgammaR => 16,
Bn::Log => 1,
Bn::Log10 => 1,
Bn::Log1p => 1,
Bn::Log2 => 1,
Bn::Pow => 1,
Bn::Sin => 1,
Bn::Sincos => 1,
Bn::Sinh => 2,
Bn::Tan => 1,
Bn::Tanh => 2,
// tgammaf has higher accuracy than tgamma.
Bn::Tgamma if ctx.fn_ident != Id::Tgamma => 1,
Bn::Tgamma => 20,
};
// There are some cases where musl's approximation is less accurate than ours. For these
// cases, increase the ULP.
if ctx.basis == Musl {
match ctx.base_name {
Bn::Cosh => ulp = 2,
Bn::Exp10 if usize::BITS < 64 => ulp = 4,
Bn::Lgamma | Bn::LgammaR => ulp = 400,
Bn::Tanh => ulp = 4,
_ => (),
}
match ctx.fn_ident {
Id::Cbrt => ulp = 2,
// FIXME(#401): musl has an incorrect result here.
Id::Fdim => ulp = 2,
Id::Sincosf => ulp = 500,
Id::Tgamma => ulp = 20,
_ => (),
}
}
if cfg!(target_arch = "x86") {
match ctx.fn_ident {
// Input `fma(0.999999999999999, 1.0000000000000013, 0.0) = 1.0000000000000002` is
// incorrect on i586 and i686.
Id::Fma => ulp = 1,
_ => (),
}
}
// In some cases, our implementation is less accurate than musl on i586.
if cfg!(x86_no_sse) {
match ctx.fn_ident {
// FIXME(#401): these need to be correctly rounded but are not.
Id::Fmaf => ulp = 1,
Id::Fdim => ulp = 1,
Id::Round => ulp = 1,
Id::Asinh => ulp = 3,
Id::Asinhf => ulp = 3,
Id::Cbrt => ulp = 1,
Id::Exp10 | Id::Exp10f => ulp = 1_000_000,
Id::Exp2 | Id::Exp2f => ulp = 10_000_000,
Id::Log1p | Id::Log1pf => ulp = 2,
Id::Tan => ulp = 2,
_ => (),
}
}
ulp
}
/// Result of checking for possible overrides.
#[derive(Debug, Default)]
pub enum CheckAction {
/// The check should pass. Default case.
#[default]
AssertSuccess,
/// Override the ULP for this check.
AssertWithUlp(u32),
/// Failure is expected, ensure this is the case (xfail). Takes a contxt string to help trace
/// back exactly why we expect this to fail.
AssertFailure(&'static str),
/// The override somehow validated the result, here it is.
Custom(TestResult),
/// Disregard the output.
Skip,
}
/// Don't run further validation on this test case.
const SKIP: CheckAction = CheckAction::Skip;
/// Return this to skip checks on a test that currently fails but shouldn't. Takes a description
/// of context.
const XFAIL: fn(&'static str) -> CheckAction = CheckAction::AssertFailure;
/// Indicates that we expect a test to fail but we aren't asserting that it does (e.g. some results
/// within a range do actually pass).
///
/// Same as `SKIP`, just indicates we have something to eventually fix.
const XFAIL_NOCHECK: CheckAction = CheckAction::Skip;
/// By default, all tests should pass.
const DEFAULT: CheckAction = CheckAction::AssertSuccess;
/// Allow overriding the outputs of specific test cases.
///
/// There are some cases where we want to xfail specific cases or handle certain inputs
/// differently than the rest of calls to `validate`. This provides a hook to do that.
///
/// If `None` is returned, checks will proceed as usual. If `Some(result)` is returned, checks
/// are skipped and the provided result is returned instead.
///
/// This gets implemented once per input type, then the functions provide further filtering
/// based on function name and values.
///
/// `ulp` can also be set to adjust the ULP for that specific test, even if `None` is still
/// returned.
pub trait MaybeOverride<Input> {
fn check_float<F: Float>(
_input: Input,
_actual: F,
_expected: F,
_ctx: &CheckCtx,
) -> CheckAction {
DEFAULT
}
fn check_int<I: Int>(_input: Input, _actual: I, _expected: I, _ctx: &CheckCtx) -> CheckAction {
DEFAULT
}
}
#[cfg(f16_enabled)]
impl MaybeOverride<(f16,)> for SpecialCase {}
impl MaybeOverride<(f32,)> for SpecialCase {
fn check_float<F: Float>(input: (f32,), actual: F, expected: F, ctx: &CheckCtx) -> CheckAction {
if ctx.base_name == BaseName::Expm1
&& !input.0.is_infinite()
&& input.0 > 80.0
&& actual.is_infinite()
&& !expected.is_infinite()
{
// we return infinity but the number is representable
if ctx.basis == CheckBasis::Musl {
return XFAIL_NOCHECK;
}
return XFAIL("expm1 representable numbers");
}
if cfg!(x86_no_sse)
&& ctx.base_name == BaseName::Exp2
&& !expected.is_infinite()
&& actual.is_infinite()
{
// We return infinity when there is a representable value. Test input: 127.97238
return XFAIL("586 exp2 representable numbers");
}
if ctx.base_name == BaseName::Sinh && input.0.abs() > 80.0 && actual.is_nan() {
// we return some NaN that should be real values or infinite
if ctx.basis == CheckBasis::Musl {
return XFAIL_NOCHECK;
}
return XFAIL("sinh unexpected NaN");
}
if (ctx.base_name == BaseName::Lgamma || ctx.base_name == BaseName::LgammaR)
&& input.0 > 4e36
&& expected.is_infinite()
&& !actual.is_infinite()
{
// This result should saturate but we return a finite value.
return XFAIL_NOCHECK;
}
if ctx.base_name == BaseName::J0 && input.0 < -1e34 {
// Errors get huge close to -inf
return XFAIL_NOCHECK;
}
unop_common(input, actual, expected, ctx)
}
fn check_int<I: Int>(input: (f32,), actual: I, expected: I, ctx: &CheckCtx) -> CheckAction {
// On MPFR for lgammaf_r, we set -1 as the integer result for negative infinity but MPFR
// sets +1
if ctx.basis == CheckBasis::Mpfr
&& ctx.base_name == BaseName::LgammaR
&& input.0 == f32::NEG_INFINITY
&& actual.abs() == expected.abs()
{
return XFAIL("lgammar integer result");
}
DEFAULT
}
}
impl MaybeOverride<(f64,)> for SpecialCase {
fn check_float<F: Float>(input: (f64,), actual: F, expected: F, ctx: &CheckCtx) -> CheckAction {
if cfg!(x86_no_sse)
&& ctx.base_name == BaseName::Ceil
&& ctx.basis == CheckBasis::Musl
&& input.0 < 0.0
&& input.0 > -1.0
&& expected == F::ZERO
&& actual == F::ZERO
{
// musl returns -0.0, we return +0.0
return XFAIL("i586 ceil signed zero");
}
if cfg!(x86_no_sse)
&& (ctx.base_name == BaseName::Rint || ctx.base_name == BaseName::Roundeven)
&& (expected - actual).abs() <= F::ONE
&& (expected - actual).abs() > F::ZERO
{
// Our rounding mode is incorrect.
return XFAIL("i586 rint rounding mode");
}
if cfg!(x86_no_sse)
&& (ctx.fn_ident == Identifier::Ceil || ctx.fn_ident == Identifier::Floor)
&& expected.eq_repr(F::NEG_ZERO)
&& actual.eq_repr(F::ZERO)
{
// FIXME: the x87 implementations do not keep the distinction between -0.0 and 0.0.
// See https://github.com/rust-lang/libm/pull/404#issuecomment-2572399955
return XFAIL("i586 ceil/floor signed zero");
}
if cfg!(x86_no_sse)
&& (ctx.fn_ident == Identifier::Exp10 || ctx.fn_ident == Identifier::Exp2)
{
// FIXME: i586 has very imprecise results with ULP > u32::MAX for these
// operations so we can't reasonably provide a limit.
return XFAIL_NOCHECK;
}
if ctx.base_name == BaseName::J0 && input.0 < -1e300 {
// Errors get huge close to -inf
return XFAIL_NOCHECK;
}
// maybe_check_nan_bits(actual, expected, ctx)
unop_common(input, actual, expected, ctx)
}
fn check_int<I: Int>(input: (f64,), actual: I, expected: I, ctx: &CheckCtx) -> CheckAction {
// On MPFR for lgamma_r, we set -1 as the integer result for negative infinity but MPFR
// sets +1
if ctx.basis == CheckBasis::Mpfr
&& ctx.base_name == BaseName::LgammaR
&& input.0 == f64::NEG_INFINITY
&& actual.abs() == expected.abs()
{
return XFAIL("lgammar integer result");
}
DEFAULT
}
}
#[cfg(f128_enabled)]
impl MaybeOverride<(f128,)> for SpecialCase {}
// F1 and F2 are always the same type, this is just to please generics
fn unop_common<F1: Float, F2: Float>(
input: (F1,),
actual: F2,
expected: F2,
ctx: &CheckCtx,
) -> CheckAction {
if ctx.base_name == BaseName::Acosh
&& input.0 < F1::NEG_ONE
&& !(expected.is_nan() && actual.is_nan())
{
// acoshf is undefined for x <= 1.0, but we return a random result at lower values.
if ctx.basis == CheckBasis::Musl {
return XFAIL_NOCHECK;
}
return XFAIL("acoshf undefined");
}
if (ctx.base_name == BaseName::Lgamma || ctx.base_name == BaseName::LgammaR)
&& input.0 < F1::ZERO
&& !input.0.is_infinite()
{
// loggamma should not be defined for x < 0, yet we both return results
return XFAIL_NOCHECK;
}
// fabs and copysign must leave NaNs untouched.
if ctx.base_name == BaseName::Fabs && input.0.is_nan() {
// LLVM currently uses x87 instructions which quieten signalling NaNs to handle the i686
// `extern "C"` `f32`/`f64` return ABI.
// LLVM issue <https://github.com/llvm/llvm-project/issues/66803>
// Rust issue <https://github.com/rust-lang/rust/issues/115567>
if cfg!(target_arch = "x86") && ctx.basis == CheckBasis::Musl && actual.is_nan() {
return XFAIL_NOCHECK;
}
// MPFR only has one NaN bitpattern; allow the default `.is_nan()` checks to validate.
if ctx.basis == CheckBasis::Mpfr {
return DEFAULT;
}
// abs and copysign require signaling NaNs to be propagated, so verify bit equality.
if actual.to_bits() == expected.to_bits() {
return CheckAction::Custom(Ok(()));
} else {
return CheckAction::Custom(Err(anyhow::anyhow!("NaNs have different bitpatterns")));
}
}
DEFAULT
}
#[cfg(f16_enabled)]
impl MaybeOverride<(f16, f16)> for SpecialCase {
fn check_float<F: Float>(
input: (f16, f16),
actual: F,
expected: F,
ctx: &CheckCtx,
) -> CheckAction {
binop_common(input, actual, expected, ctx)
}
}
impl MaybeOverride<(f32, f32)> for SpecialCase {
fn check_float<F: Float>(
input: (f32, f32),
actual: F,
expected: F,
ctx: &CheckCtx,
) -> CheckAction {
binop_common(input, actual, expected, ctx)
}
}
impl MaybeOverride<(f64, f64)> for SpecialCase {
fn check_float<F: Float>(
input: (f64, f64),
actual: F,
expected: F,
ctx: &CheckCtx,
) -> CheckAction {
binop_common(input, actual, expected, ctx)
}
}
#[cfg(f128_enabled)]
impl MaybeOverride<(f128, f128)> for SpecialCase {
fn check_float<F: Float>(
input: (f128, f128),
actual: F,
expected: F,
ctx: &CheckCtx,
) -> CheckAction {
binop_common(input, actual, expected, ctx)
}
}
// F1 and F2 are always the same type, this is just to please generics
fn binop_common<F1: Float, F2: Float>(
input: (F1, F1),
actual: F2,
expected: F2,
ctx: &CheckCtx,
) -> CheckAction {
// MPFR only has one NaN bitpattern; allow the default `.is_nan()` checks to validate. Skip if
// the first input (magnitude source) is NaN and the output is also a NaN, or if the second
// input (sign source) is NaN.
if ctx.basis == CheckBasis::Mpfr
&& ((input.0.is_nan() && actual.is_nan() && expected.is_nan()) || input.1.is_nan())
{
return SKIP;
}
/* FIXME(#439): our fmin and fmax do not compare signed zeros */
if ctx.base_name == BaseName::Fmin
&& input.0.biteq(F1::NEG_ZERO)
&& input.1.biteq(F1::ZERO)
&& expected.biteq(F2::NEG_ZERO)
&& actual.biteq(F2::ZERO)
{
return XFAIL("fmin signed zeroes");
}
if ctx.base_name == BaseName::Fmax
&& input.0.biteq(F1::NEG_ZERO)
&& input.1.biteq(F1::ZERO)
&& expected.biteq(F2::ZERO)
&& actual.biteq(F2::NEG_ZERO)
{
return XFAIL("fmax signed zeroes");
}
// Musl propagates NaNs if one is provided as the input, but we return the other input.
if (ctx.base_name == BaseName::Fmax || ctx.base_name == BaseName::Fmin)
&& ctx.basis == Musl
&& (input.0.is_nan() ^ input.1.is_nan())
&& expected.is_nan()
{
return XFAIL("fmax/fmin musl NaN");
}
DEFAULT
}
impl MaybeOverride<(i32, f32)> for SpecialCase {
fn check_float<F: Float>(
input: (i32, f32),
actual: F,
expected: F,
ctx: &CheckCtx,
) -> CheckAction {
// `ynf(213, 109.15641) = -inf` with our library, should be finite.
if ctx.basis == Mpfr
&& ctx.base_name == BaseName::Yn
&& input.0 > 200
&& !expected.is_infinite()
&& actual.is_infinite()
{
return XFAIL("ynf infinity mismatch");
}
int_float_common(input, actual, expected, ctx)
}
}
impl MaybeOverride<(i32, f64)> for SpecialCase {
fn check_float<F: Float>(
input: (i32, f64),
actual: F,
expected: F,
ctx: &CheckCtx,
) -> CheckAction {
int_float_common(input, actual, expected, ctx)
}
}
fn int_float_common<F1: Float, F2: Float>(
input: (i32, F1),
actual: F2,
expected: F2,
ctx: &CheckCtx,
) -> CheckAction {
if ctx.basis == Mpfr
&& (ctx.base_name == BaseName::Jn || ctx.base_name == BaseName::Yn)
&& input.1 == F1::NEG_INFINITY
&& actual == F2::ZERO
&& expected == F2::ZERO
{
return XFAIL("we disagree with MPFR on the sign of zero");
}
// Values near infinity sometimes get cut off for us. `ynf(681, 509.90924) = -inf` but should
// be -3.2161271e38.
if ctx.basis == Musl
&& ctx.fn_ident == Identifier::Ynf
&& !expected.is_infinite()
&& actual.is_infinite()
&& (expected.abs().to_bits().abs_diff(actual.abs().to_bits())
< F2::Int::cast_from(10_000_000u32))
{
return XFAIL_NOCHECK;
}
// Our bessel functions blow up with large N values
if ctx.basis == Musl && (ctx.base_name == BaseName::Jn || ctx.base_name == BaseName::Yn) {
if cfg!(x86_no_sse) {
// Precision is especially bad on i586, not worth checking.
return XFAIL_NOCHECK;
}
if input.0 > 4000 {
return XFAIL_NOCHECK;
} else if input.0 > 100 {
return CheckAction::AssertWithUlp(1_000_000);
}
}
DEFAULT
}
#[cfg(f16_enabled)]
impl MaybeOverride<(f16, i32)> for SpecialCase {}
impl MaybeOverride<(f32, i32)> for SpecialCase {}
impl MaybeOverride<(f64, i32)> for SpecialCase {}
#[cfg(f128_enabled)]
impl MaybeOverride<(f128, i32)> for SpecialCase {}
impl MaybeOverride<(f32, f32, f32)> for SpecialCase {}
impl MaybeOverride<(f64, f64, f64)> for SpecialCase {}
#[cfg(f128_enabled)]
impl MaybeOverride<(f128, f128, f128)> for SpecialCase {}

View file

@ -0,0 +1,370 @@
//! Configuration for how tests get run.
use std::ops::RangeInclusive;
use std::sync::LazyLock;
use std::{env, str};
use crate::generate::random::{SEED, SEED_ENV};
use crate::{BaseName, FloatTy, Identifier, test_log};
/// The environment variable indicating which extensive tests should be run.
pub const EXTENSIVE_ENV: &str = "LIBM_EXTENSIVE_TESTS";
/// Specify the number of iterations via this environment variable, rather than using the default.
pub const EXTENSIVE_ITER_ENV: &str = "LIBM_EXTENSIVE_ITERATIONS";
/// The override value, if set by the above environment.
static EXTENSIVE_ITER_OVERRIDE: LazyLock<Option<u64>> = LazyLock::new(|| {
env::var(EXTENSIVE_ITER_ENV).map(|v| v.parse().expect("failed to parse iteration count")).ok()
});
/// Specific tests that need to have a reduced amount of iterations to complete in a reasonable
/// amount of time.
///
/// Contains the itentifier+generator combo to match on, plus the factor to reduce by.
const EXTEMELY_SLOW_TESTS: &[(Identifier, GeneratorKind, u64)] = &[
(Identifier::Fmodf128, GeneratorKind::QuickSpaced, 50),
(Identifier::Fmodf128, GeneratorKind::Extensive, 50),
];
/// Maximum number of iterations to run for a single routine.
///
/// The default value of one greater than `u32::MAX` allows testing single-argument `f32` routines
/// and single- or double-argument `f16` routines exhaustively. `f64` and `f128` can't feasibly
/// be tested exhaustively; however, [`EXTENSIVE_ITER_ENV`] can be set to run tests for multiple
/// hours.
pub fn extensive_max_iterations() -> u64 {
let default = 1 << 32; // default value
EXTENSIVE_ITER_OVERRIDE.unwrap_or(default)
}
/// Context passed to [`CheckOutput`].
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CheckCtx {
/// Allowed ULP deviation
pub ulp: u32,
pub fn_ident: Identifier,
pub base_name: BaseName,
/// Function name.
pub fn_name: &'static str,
/// Return the unsuffixed version of the function name.
pub base_name_str: &'static str,
/// Source of truth for tests.
pub basis: CheckBasis,
pub gen_kind: GeneratorKind,
/// If specified, this value will override the value returned by [`iteration_count`].
pub override_iterations: Option<u64>,
}
impl CheckCtx {
/// Create a new check context, using the default ULP for the function.
pub fn new(fn_ident: Identifier, basis: CheckBasis, gen_kind: GeneratorKind) -> Self {
let mut ret = Self {
ulp: 0,
fn_ident,
fn_name: fn_ident.as_str(),
base_name: fn_ident.base_name(),
base_name_str: fn_ident.base_name().as_str(),
basis,
gen_kind,
override_iterations: None,
};
ret.ulp = crate::default_ulp(&ret);
ret
}
/// The number of input arguments for this function.
pub fn input_count(&self) -> usize {
self.fn_ident.math_op().rust_sig.args.len()
}
pub fn override_iterations(&mut self, count: u64) {
self.override_iterations = Some(count)
}
}
/// Possible items to test against
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum CheckBasis {
/// Check against Musl's math sources.
Musl,
/// Check against infinite precision (MPFR).
Mpfr,
/// Benchmarks or other times when this is not relevant.
None,
}
/// The different kinds of generators that provide test input, which account for input pattern
/// and quantity.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum GeneratorKind {
EdgeCases,
Extensive,
QuickSpaced,
Random,
List,
}
/// A list of all functions that should get extensive tests.
///
/// This also supports the special test name `all` to run all tests, as well as `all_f16`,
/// `all_f32`, `all_f64`, and `all_f128` to run all tests for a specific float type.
static EXTENSIVE: LazyLock<Vec<Identifier>> = LazyLock::new(|| {
let var = env::var(EXTENSIVE_ENV).unwrap_or_default();
let list = var.split(",").filter(|s| !s.is_empty()).collect::<Vec<_>>();
let mut ret = Vec::new();
let append_ty_ops = |ret: &mut Vec<_>, fty: FloatTy| {
let iter = Identifier::ALL.iter().filter(move |id| id.math_op().float_ty == fty).copied();
ret.extend(iter);
};
for item in list {
match item {
"all" => ret = Identifier::ALL.to_owned(),
"all_f16" => append_ty_ops(&mut ret, FloatTy::F16),
"all_f32" => append_ty_ops(&mut ret, FloatTy::F32),
"all_f64" => append_ty_ops(&mut ret, FloatTy::F64),
"all_f128" => append_ty_ops(&mut ret, FloatTy::F128),
s => {
let id = Identifier::from_str(s)
.unwrap_or_else(|| panic!("unrecognized test name `{s}`"));
ret.push(id);
}
}
}
ret
});
/// Information about the function to be tested.
#[derive(Debug)]
struct TestEnv {
/// Tests should be reduced because the platform is slow. E.g. 32-bit or emulated.
slow_platform: bool,
/// The float cannot be tested exhaustively, `f64` or `f128`.
large_float_ty: bool,
/// Env indicates that an extensive test should be run.
should_run_extensive: bool,
/// Multiprecision tests will be run.
mp_tests_enabled: bool,
/// The number of inputs to the function.
input_count: usize,
}
impl TestEnv {
fn from_env(ctx: &CheckCtx) -> Self {
let id = ctx.fn_ident;
let op = id.math_op();
let will_run_mp = cfg!(feature = "build-mpfr");
let large_float_ty = match op.float_ty {
FloatTy::F16 | FloatTy::F32 => false,
FloatTy::F64 | FloatTy::F128 => true,
};
let will_run_extensive = EXTENSIVE.contains(&id);
let input_count = op.rust_sig.args.len();
Self {
slow_platform: slow_platform(),
large_float_ty,
should_run_extensive: will_run_extensive,
mp_tests_enabled: will_run_mp,
input_count,
}
}
}
/// Tests are pretty slow on non-64-bit targets, x86 MacOS, and targets that run in QEMU. Start
/// with a reduced number on these platforms.
fn slow_platform() -> bool {
let slow_on_ci = crate::emulated()
|| usize::BITS < 64
|| cfg!(all(target_arch = "x86_64", target_vendor = "apple"));
// If not running in CI, there is no need to reduce iteration count.
slow_on_ci && crate::ci()
}
/// The number of iterations to run for a given test.
pub fn iteration_count(ctx: &CheckCtx, argnum: usize) -> u64 {
let t_env = TestEnv::from_env(ctx);
// Ideally run 5M tests
let mut domain_iter_count: u64 = 4_000_000;
// Start with a reduced number of tests on slow platforms.
if t_env.slow_platform {
domain_iter_count = 100_000;
}
// If we will be running tests against MPFR, we don't need to test as much against musl.
// However, there are some platforms where we have to test against musl since MPFR can't be
// built.
if t_env.mp_tests_enabled && ctx.basis == CheckBasis::Musl {
domain_iter_count /= 100;
}
// Run fewer random tests than domain tests.
let random_iter_count = domain_iter_count / 100;
let mut total_iterations = match ctx.gen_kind {
GeneratorKind::QuickSpaced => domain_iter_count,
GeneratorKind::Random => random_iter_count,
GeneratorKind::Extensive => extensive_max_iterations(),
GeneratorKind::EdgeCases | GeneratorKind::List => {
unimplemented!("shoudn't need `iteration_count` for {:?}", ctx.gen_kind)
}
};
// Larger float types get more iterations.
if t_env.large_float_ty && ctx.gen_kind != GeneratorKind::Extensive {
if ctx.gen_kind == GeneratorKind::Extensive {
// Extensive already has a pretty high test count.
total_iterations *= 2;
} else {
total_iterations *= 4;
}
}
// Functions with more arguments get more iterations.
let arg_multiplier = 1 << (t_env.input_count - 1);
total_iterations *= arg_multiplier;
// FMA has a huge domain but is reasonably fast to run, so increase another 1.5x.
if ctx.base_name == BaseName::Fma {
total_iterations = 3 * total_iterations / 2;
}
// Some tests are significantly slower than others and need to be further reduced.
if let Some((_id, _gen, scale)) = EXTEMELY_SLOW_TESTS
.iter()
.find(|(id, generator, _scale)| *id == ctx.fn_ident && *generator == ctx.gen_kind)
{
// However, do not override if the extensive iteration count has been manually set.
if !(ctx.gen_kind == GeneratorKind::Extensive && EXTENSIVE_ITER_OVERRIDE.is_some()) {
total_iterations /= scale;
}
}
if cfg!(optimizations_enabled) {
// Always run at least 10,000 tests.
total_iterations = total_iterations.max(10_000);
} else {
// Without optimizations, just run a quick check regardless of other parameters.
total_iterations = 800;
}
let mut overridden = false;
if let Some(count) = ctx.override_iterations {
total_iterations = count;
overridden = true;
}
// Adjust for the number of inputs
let ntests = match t_env.input_count {
1 => total_iterations,
2 => (total_iterations as f64).sqrt().ceil() as u64,
3 => (total_iterations as f64).cbrt().ceil() as u64,
_ => panic!("test has more than three arguments"),
};
let total = ntests.pow(t_env.input_count.try_into().unwrap());
let seed_msg = match ctx.gen_kind {
GeneratorKind::QuickSpaced | GeneratorKind::Extensive => String::new(),
GeneratorKind::Random => {
format!(" using `{SEED_ENV}={}`", str::from_utf8(SEED.as_slice()).unwrap())
}
GeneratorKind::EdgeCases | GeneratorKind::List => unimplemented!(),
};
test_log(&format!(
"{gen_kind:?} {basis:?} {fn_ident} arg {arg}/{args}: {ntests} iterations \
({total} total){seed_msg}{omsg}",
gen_kind = ctx.gen_kind,
basis = ctx.basis,
fn_ident = ctx.fn_ident,
arg = argnum + 1,
args = t_env.input_count,
omsg = if overridden { " (overridden)" } else { "" }
));
ntests
}
/// Some tests require that an integer be kept within reasonable limits; generate that here.
pub fn int_range(ctx: &CheckCtx, argnum: usize) -> RangeInclusive<i32> {
let t_env = TestEnv::from_env(ctx);
if !matches!(ctx.base_name, BaseName::Jn | BaseName::Yn) {
return i32::MIN..=i32::MAX;
}
assert_eq!(argnum, 0, "For `jn`/`yn`, only the first argument takes an integer");
// The integer argument to `jn` is an iteration count. Limit this to ensure tests can be
// completed in a reasonable amount of time.
let non_extensive_range = if t_env.slow_platform || !cfg!(optimizations_enabled) {
(-0xf)..=0xff
} else {
(-0xff)..=0xffff
};
let extensive_range = (-0xfff)..=0xfffff;
match ctx.gen_kind {
GeneratorKind::Extensive => extensive_range,
GeneratorKind::QuickSpaced | GeneratorKind::Random => non_extensive_range,
GeneratorKind::EdgeCases => extensive_range,
GeneratorKind::List => unimplemented!("shoudn't need range for {:?}", ctx.gen_kind),
}
}
/// For domain tests, limit how many asymptotes or specified check points we test.
pub fn check_point_count(ctx: &CheckCtx) -> usize {
assert_eq!(
ctx.gen_kind,
GeneratorKind::EdgeCases,
"check_point_count is intended for edge case tests"
);
let t_env = TestEnv::from_env(ctx);
if t_env.slow_platform || !cfg!(optimizations_enabled) { 4 } else { 10 }
}
/// When validating points of interest (e.g. asymptotes, inflection points, extremes), also check
/// this many surrounding values.
pub fn check_near_count(ctx: &CheckCtx) -> u64 {
assert_eq!(
ctx.gen_kind,
GeneratorKind::EdgeCases,
"check_near_count is intended for edge case tests"
);
if cfg!(optimizations_enabled) {
// Taper based on the number of inputs.
match ctx.input_count() {
1 | 2 => 100,
3 => 50,
x => panic!("unexpected argument count {x}"),
}
} else {
8
}
}
/// Check whether extensive actions should be run or skipped.
pub fn skip_extensive_test(ctx: &CheckCtx) -> bool {
let t_env = TestEnv::from_env(ctx);
!t_env.should_run_extensive
}
/// The number of iterations to run for `u256` fuzz tests.
pub fn bigint_fuzz_iteration_count() -> u64 {
if !cfg!(optimizations_enabled) {
return 1000;
}
if slow_platform() { 100_000 } else { 5_000_000 }
}

View file

@ -0,0 +1,447 @@
//! Traits related to testing.
//!
//! There are two main traits in this module:
//!
//! - `TupleCall`: implemented on tuples to allow calling them as function arguments.
//! - `CheckOutput`: implemented on anything that is an output type for validation against an
//! expected value.
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::{fmt, panic};
use anyhow::{Context, anyhow, bail, ensure};
use libm::support::Hexf;
use crate::precision::CheckAction;
use crate::{
CheckBasis, CheckCtx, Float, GeneratorKind, Int, MaybeOverride, SpecialCase, TestResult,
};
/// Trait for calling a function with a tuple as arguments.
///
/// Implemented on the tuple with the function signature as the generic (so we can use the same
/// tuple for multiple signatures).
pub trait TupleCall<Func>: fmt::Debug {
type Output;
fn call(self, f: Func) -> Self::Output;
/// Intercept panics and print the input to stderr before continuing.
fn call_intercept_panics(self, f: Func) -> Self::Output
where
Self: RefUnwindSafe + Copy,
Func: UnwindSafe,
{
let res = panic::catch_unwind(|| self.call(f));
match res {
Ok(v) => v,
Err(e) => {
eprintln!("panic with the following input: {self:?}");
panic::resume_unwind(e)
}
}
}
}
/// A trait to implement on any output type so we can verify it in a generic way.
pub trait CheckOutput<Input>: Sized {
/// Validate `self` (actual) and `expected` are the same.
///
/// `input` is only used here for error messages.
fn validate(self, expected: Self, input: Input, ctx: &CheckCtx) -> TestResult;
}
/// A helper trait to print something as hex with the correct number of nibbles, e.g. a `u32`
/// will always print with `0x` followed by 8 digits.
///
/// This is only used for printing errors so allocating is okay.
pub trait Hex: Copy {
/// Hex integer syntax.
fn hex(self) -> String;
/// Hex float syntax.
fn hexf(self) -> String;
}
/* implement `TupleCall` */
impl<T1, R> TupleCall<fn(T1) -> R> for (T1,)
where
T1: fmt::Debug,
{
type Output = R;
fn call(self, f: fn(T1) -> R) -> Self::Output {
f(self.0)
}
}
impl<T1, T2, R> TupleCall<fn(T1, T2) -> R> for (T1, T2)
where
T1: fmt::Debug,
T2: fmt::Debug,
{
type Output = R;
fn call(self, f: fn(T1, T2) -> R) -> Self::Output {
f(self.0, self.1)
}
}
impl<T1, T2, R> TupleCall<fn(T1, &mut T2) -> R> for (T1,)
where
T1: fmt::Debug,
T2: fmt::Debug + Default,
{
type Output = (R, T2);
fn call(self, f: fn(T1, &mut T2) -> R) -> Self::Output {
let mut t2 = T2::default();
(f(self.0, &mut t2), t2)
}
}
impl<T1, T2, T3, R> TupleCall<fn(T1, T2, T3) -> R> for (T1, T2, T3)
where
T1: fmt::Debug,
T2: fmt::Debug,
T3: fmt::Debug,
{
type Output = R;
fn call(self, f: fn(T1, T2, T3) -> R) -> Self::Output {
f(self.0, self.1, self.2)
}
}
impl<T1, T2, T3, R> TupleCall<fn(T1, T2, &mut T3) -> R> for (T1, T2)
where
T1: fmt::Debug,
T2: fmt::Debug,
T3: fmt::Debug + Default,
{
type Output = (R, T3);
fn call(self, f: fn(T1, T2, &mut T3) -> R) -> Self::Output {
let mut t3 = T3::default();
(f(self.0, self.1, &mut t3), t3)
}
}
impl<T1, T2, T3> TupleCall<for<'a> fn(T1, &'a mut T2, &'a mut T3)> for (T1,)
where
T1: fmt::Debug,
T2: fmt::Debug + Default,
T3: fmt::Debug + Default,
{
type Output = (T2, T3);
fn call(self, f: for<'a> fn(T1, &'a mut T2, &'a mut T3)) -> Self::Output {
let mut t2 = T2::default();
let mut t3 = T3::default();
f(self.0, &mut t2, &mut t3);
(t2, t3)
}
}
/* implement `Hex` */
impl<T1> Hex for (T1,)
where
T1: Hex,
{
fn hex(self) -> String {
format!("({},)", self.0.hex())
}
fn hexf(self) -> String {
format!("({},)", self.0.hexf())
}
}
impl<T1, T2> Hex for (T1, T2)
where
T1: Hex,
T2: Hex,
{
fn hex(self) -> String {
format!("({}, {})", self.0.hex(), self.1.hex())
}
fn hexf(self) -> String {
format!("({}, {})", self.0.hexf(), self.1.hexf())
}
}
impl<T1, T2, T3> Hex for (T1, T2, T3)
where
T1: Hex,
T2: Hex,
T3: Hex,
{
fn hex(self) -> String {
format!("({}, {}, {})", self.0.hex(), self.1.hex(), self.2.hex())
}
fn hexf(self) -> String {
format!("({}, {}, {})", self.0.hexf(), self.1.hexf(), self.2.hexf())
}
}
/* trait implementations for ints */
macro_rules! impl_int {
($($ty:ty),*) => {
$(
impl Hex for $ty {
fn hex(self) -> String {
format!("{self:#0width$x}", width = ((Self::BITS / 4) + 2) as usize)
}
fn hexf(self) -> String {
String::new()
}
}
impl<Input> $crate::CheckOutput<Input> for $ty
where
Input: Hex + fmt::Debug,
SpecialCase: MaybeOverride<Input>,
{
fn validate<'a>(
self,
expected: Self,
input: Input,
ctx: &$crate::CheckCtx,
) -> TestResult {
validate_int(self, expected, input, ctx)
}
}
)*
};
}
fn validate_int<I, Input>(actual: I, expected: I, input: Input, ctx: &CheckCtx) -> TestResult
where
I: Int + Hex,
Input: Hex + fmt::Debug,
SpecialCase: MaybeOverride<Input>,
{
let (result, xfail_msg) = match SpecialCase::check_int(input, actual, expected, ctx) {
// `require_biteq` forbids overrides.
_ if ctx.gen_kind == GeneratorKind::List => (actual == expected, None),
CheckAction::AssertSuccess => (actual == expected, None),
CheckAction::AssertFailure(msg) => (actual != expected, Some(msg)),
CheckAction::Custom(res) => return res,
CheckAction::Skip => return Ok(()),
CheckAction::AssertWithUlp(_) => panic!("ulp has no meaning for integer checks"),
};
let make_xfail_msg = || match xfail_msg {
Some(m) => format!(
"expected failure but test passed. Does an XFAIL need to be updated?\n\
failed at: {m}",
),
None => String::new(),
};
anyhow::ensure!(
result,
"\
\n input: {input:?} {ibits}\
\n expected: {expected:<22?} {expbits}\
\n actual: {actual:<22?} {actbits}\
\n {msg}\
",
actbits = actual.hex(),
expbits = expected.hex(),
ibits = input.hex(),
msg = make_xfail_msg()
);
Ok(())
}
impl_int!(u32, i32, u64, i64);
/* trait implementations for floats */
macro_rules! impl_float {
($($ty:ty),*) => {
$(
impl Hex for $ty {
fn hex(self) -> String {
format!(
"{:#0width$x}",
self.to_bits(),
width = ((Self::BITS / 4) + 2) as usize
)
}
fn hexf(self) -> String {
format!("{}", Hexf(self))
}
}
impl<Input> $crate::CheckOutput<Input> for $ty
where
Input: Hex + fmt::Debug,
SpecialCase: MaybeOverride<Input>,
{
fn validate<'a>(
self,
expected: Self,
input: Input,
ctx: &$crate::CheckCtx,
) -> TestResult {
validate_float(self, expected, input, ctx)
}
}
)*
};
}
fn validate_float<F, Input>(actual: F, expected: F, input: Input, ctx: &CheckCtx) -> TestResult
where
F: Float + Hex,
Input: Hex + fmt::Debug,
u32: TryFrom<F::SignedInt, Error: fmt::Debug>,
SpecialCase: MaybeOverride<Input>,
{
let mut assert_failure_msg = None;
// Create a wrapper function so we only need to `.with_context` once.
let mut inner = || -> TestResult {
let mut allowed_ulp = ctx.ulp;
// Forbid overrides if the items came from an explicit list, as long as we are checking
// against either MPFR or the result itself.
let require_biteq = ctx.gen_kind == GeneratorKind::List && ctx.basis != CheckBasis::Musl;
match SpecialCase::check_float(input, actual, expected, ctx) {
_ if require_biteq => (),
CheckAction::AssertSuccess => (),
CheckAction::AssertFailure(msg) => assert_failure_msg = Some(msg),
CheckAction::Custom(res) => return res,
CheckAction::Skip => return Ok(()),
CheckAction::AssertWithUlp(ulp_override) => allowed_ulp = ulp_override,
};
// Check when both are NaNs
if actual.is_nan() && expected.is_nan() {
if require_biteq && ctx.basis == CheckBasis::None {
ensure!(actual.to_bits() == expected.to_bits(), "mismatched NaN bitpatterns");
}
// By default, NaNs have nothing special to check.
return Ok(());
} else if actual.is_nan() || expected.is_nan() {
// Check when only one is a NaN
bail!("real value != NaN")
}
// Make sure that the signs are the same before checing ULP to avoid wraparound
let act_sig = actual.signum();
let exp_sig = expected.signum();
ensure!(act_sig == exp_sig, "mismatched signs {act_sig:?} {exp_sig:?}");
if actual.is_infinite() ^ expected.is_infinite() {
bail!("mismatched infinities");
}
let act_bits = actual.to_bits().signed();
let exp_bits = expected.to_bits().signed();
let ulp_diff = act_bits.checked_sub(exp_bits).unwrap().abs();
let ulp_u32 = u32::try_from(ulp_diff)
.map_err(|e| anyhow!("{e:?}: ulp of {ulp_diff} exceeds u32::MAX"))?;
ensure!(ulp_u32 <= allowed_ulp, "ulp {ulp_diff} > {allowed_ulp}",);
Ok(())
};
let mut res = inner();
if let Some(msg) = assert_failure_msg {
// Invert `Ok` and `Err` if the test is an xfail.
if res.is_ok() {
let e = anyhow!(
"expected failure but test passed. Does an XFAIL need to be updated?\n\
failed at: {msg}",
);
res = Err(e)
} else {
res = Ok(())
}
}
res.with_context(|| {
format!(
"\
\n input: {input:?}\
\n as hex: {ihex}\
\n as bits: {ibits}\
\n expected: {expected:<22?} {exphex} {expbits}\
\n actual: {actual:<22?} {acthex} {actbits}\
",
ihex = input.hexf(),
ibits = input.hex(),
exphex = expected.hexf(),
expbits = expected.hex(),
actbits = actual.hex(),
acthex = actual.hexf(),
)
})
}
impl_float!(f32, f64);
#[cfg(f16_enabled)]
impl_float!(f16);
#[cfg(f128_enabled)]
impl_float!(f128);
/* trait implementations for compound types */
/// Implement `CheckOutput` for combinations of types.
macro_rules! impl_tuples {
($(($a:ty, $b:ty);)*) => {
$(
impl<Input> CheckOutput<Input> for ($a, $b)
where
Input: Hex + fmt::Debug,
SpecialCase: MaybeOverride<Input>,
{
fn validate<'a>(
self,
expected: Self,
input: Input,
ctx: &CheckCtx,
) -> TestResult {
self.0.validate(expected.0, input, ctx)
.and_then(|()| self.1.validate(expected.1, input, ctx))
.with_context(|| format!(
"full context:\
\n input: {input:?} {ibits}\
\n as hex: {ihex}\
\n as bits: {ibits}\
\n expected: {expected:?} {expbits}\
\n actual: {self:?} {actbits}\
",
ihex = input.hexf(),
ibits = input.hex(),
expbits = expected.hex(),
actbits = self.hex(),
))
}
}
)*
};
}
impl_tuples!(
(f32, i32);
(f64, i32);
(f32, f32);
(f64, f64);
);

View file

@ -0,0 +1,61 @@
//! Ensure that `for_each_function!` isn't missing any symbols.
use std::collections::HashSet;
use std::env;
use std::path::Path;
use std::process::Command;
macro_rules! callback {
(
fn_name: $name:ident,
attrs: [$($attr:meta),*],
extra: [$set:ident],
) => {
let name = stringify!($name);
let new = $set.insert(name);
assert!(new, "duplicate function `{name}` in `ALL_OPERATIONS`");
};
}
#[test]
fn test_for_each_function_all_included() {
let all_functions: HashSet<_> = include_str!("../../../etc/function-list.txt")
.lines()
.filter(|line| !line.starts_with("#"))
.collect();
let mut tested = HashSet::new();
libm_macros::for_each_function! {
callback: callback,
extra: [tested],
};
let untested = all_functions.difference(&tested);
if untested.clone().next().is_some() {
panic!(
"missing tests for the following: {untested:#?} \
\nmake sure any new functions are entered in \
`ALL_OPERATIONS` (in `libm-macros`)."
);
}
assert_eq!(all_functions, tested);
}
#[test]
fn ensure_list_updated() {
if libm_test::ci() {
// Most CI tests run in Docker where we don't have Python or Rustdoc, so it's easiest
// to just run the python file directly when it is available.
eprintln!("skipping test; CI runs the python file directly");
return;
}
let res = Command::new("python3")
.arg(Path::new(env!("CARGO_MANIFEST_DIR")).join("../../etc/update-api-list.py"))
.arg("--check")
.status()
.unwrap();
assert!(res.success(), "May need to run `./etc/update-api-list.py`");
}

View file

@ -0,0 +1,143 @@
//! Compare our implementations with the result of musl functions, as provided by `musl-math-sys`.
//!
//! Currently this only tests randomized inputs. In the future this may be improved to test edge
//! cases or run exhaustive tests.
//!
//! Note that musl functions do not always provide 0.5ULP rounding, so our functions can do better
//! than these results.
// There are some targets we can't build musl for
#![cfg(feature = "build-musl")]
use libm_test::generate::{case_list, edge_cases, random, spaced};
use libm_test::{CheckBasis, CheckCtx, CheckOutput, GeneratorKind, MathOp, TupleCall};
const BASIS: CheckBasis = CheckBasis::Musl;
fn musl_runner<Op: MathOp>(
ctx: &CheckCtx,
cases: impl Iterator<Item = Op::RustArgs>,
musl_fn: Op::CFn,
) {
for input in cases {
let musl_res = input.call(musl_fn);
let crate_res = input.call_intercept_panics(Op::ROUTINE);
crate_res.validate(musl_res, input, ctx).unwrap();
}
}
/// Test against musl with generators from a domain.
macro_rules! musl_tests {
(
fn_name: $fn_name:ident,
attrs: [$($attr:meta),*],
) => {
paste::paste! {
#[test]
$(#[$attr])*
fn [< musl_case_list_ $fn_name >]() {
type Op = libm_test::op::$fn_name::Routine;
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GeneratorKind::List);
let cases = case_list::get_test_cases_basis::<Op>(&ctx).0;
musl_runner::<Op>(&ctx, cases, musl_math_sys::$fn_name);
}
#[test]
$(#[$attr])*
fn [< musl_random_ $fn_name >]() {
type Op = libm_test::op::$fn_name::Routine;
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GeneratorKind::Random);
let cases = random::get_test_cases::<<Op as MathOp>::RustArgs>(&ctx).0;
musl_runner::<Op>(&ctx, cases, musl_math_sys::$fn_name);
}
#[test]
$(#[$attr])*
fn [< musl_edge_case_ $fn_name >]() {
type Op = libm_test::op::$fn_name::Routine;
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GeneratorKind::EdgeCases);
let cases = edge_cases::get_test_cases::<Op>(&ctx).0;
musl_runner::<Op>(&ctx, cases, musl_math_sys::$fn_name);
}
#[test]
$(#[$attr])*
fn [< musl_quickspace_ $fn_name >]() {
type Op = libm_test::op::$fn_name::Routine;
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GeneratorKind::QuickSpaced);
let cases = spaced::get_test_cases::<Op>(&ctx).0;
musl_runner::<Op>(&ctx, cases, musl_math_sys::$fn_name);
}
}
};
}
libm_macros::for_each_function! {
callback: musl_tests,
attributes: [],
skip: [
// TODO integer inputs
jn,
jnf,
ldexp,
ldexpf,
scalbn,
scalbnf,
yn,
ynf,
// Not provided by musl
// verify-sorted-start
ceilf128,
ceilf16,
copysignf128,
copysignf16,
fabsf128,
fabsf16,
fdimf128,
fdimf16,
floorf128,
floorf16,
fmaf128,
fmaxf128,
fmaxf16,
fmaximum,
fmaximum_num,
fmaximum_numf,
fmaximum_numf128,
fmaximum_numf16,
fmaximumf,
fmaximumf128,
fmaximumf16,
fminf128,
fminf16,
fminimum,
fminimum_num,
fminimum_numf,
fminimum_numf128,
fminimum_numf16,
fminimumf,
fminimumf128,
fminimumf16,
fmodf128,
fmodf16,
ldexpf128,
ldexpf16,
rintf128,
rintf16,
roundeven,
roundevenf,
roundevenf128,
roundevenf16,
roundf128,
roundf16,
scalbnf128,
scalbnf16,
sqrtf128,
sqrtf16,
truncf128,
truncf16,
// verify-sorted-end
],
}

View file

@ -0,0 +1,79 @@
//! Test with "infinite precision"
#![cfg(feature = "build-mpfr")]
use libm_test::generate::{case_list, edge_cases, random, spaced};
use libm_test::mpfloat::MpOp;
use libm_test::{CheckBasis, CheckCtx, CheckOutput, GeneratorKind, MathOp, TupleCall};
const BASIS: CheckBasis = CheckBasis::Mpfr;
fn mp_runner<Op: MathOp + MpOp>(ctx: &CheckCtx, cases: impl Iterator<Item = Op::RustArgs>) {
let mut mp_vals = Op::new_mp();
for input in cases {
let mp_res = Op::run(&mut mp_vals, input);
let crate_res = input.call_intercept_panics(Op::ROUTINE);
crate_res.validate(mp_res, input, ctx).unwrap();
}
}
macro_rules! mp_tests {
(
fn_name: $fn_name:ident,
attrs: [$($attr:meta),*],
) => {
paste::paste! {
#[test]
$(#[$attr])*
fn [< mp_case_list_ $fn_name >]() {
type Op = libm_test::op::$fn_name::Routine;
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GeneratorKind::List);
let cases = case_list::get_test_cases_basis::<Op>(&ctx).0;
mp_runner::<Op>(&ctx, cases);
}
#[test]
$(#[$attr])*
fn [< mp_random_ $fn_name >]() {
type Op = libm_test::op::$fn_name::Routine;
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GeneratorKind::Random);
let cases = random::get_test_cases::<<Op as MathOp>::RustArgs>(&ctx).0;
mp_runner::<Op>(&ctx, cases);
}
#[test]
$(#[$attr])*
fn [< mp_edge_case_ $fn_name >]() {
type Op = libm_test::op::$fn_name::Routine;
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GeneratorKind::EdgeCases);
let cases = edge_cases::get_test_cases::<Op>(&ctx).0;
mp_runner::<Op>(&ctx, cases);
}
#[test]
$(#[$attr])*
fn [< mp_quickspace_ $fn_name >]() {
type Op = libm_test::op::$fn_name::Routine;
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GeneratorKind::QuickSpaced);
let cases = spaced::get_test_cases::<Op>(&ctx).0;
mp_runner::<Op>(&ctx, cases);
}
}
};
}
libm_macros::for_each_function! {
callback: mp_tests,
attributes: [
// Also an assertion failure on i686: at `MPFR_ASSERTN (! mpfr_erangeflag_p ())`
#[ignore = "large values are infeasible in MPFR"]
[jn, jnf, yn, ynf],
],
skip: [
// FIXME: test needed, see
// https://github.com/rust-lang/libm/pull/311#discussion_r1818273392
nextafter,
nextafterf,
],
}

View file

@ -0,0 +1,38 @@
//! Test cases that have both an input and an output, so do not require a basis.
use libm_test::generate::case_list;
use libm_test::{CheckBasis, CheckCtx, CheckOutput, GeneratorKind, MathOp, TupleCall};
const BASIS: CheckBasis = CheckBasis::None;
fn standalone_runner<Op: MathOp>(
ctx: &CheckCtx,
cases: impl Iterator<Item = (Op::RustArgs, Op::RustRet)>,
) {
for (input, expected) in cases {
let crate_res = input.call_intercept_panics(Op::ROUTINE);
crate_res.validate(expected, input, ctx).unwrap();
}
}
macro_rules! mp_tests {
(
fn_name: $fn_name:ident,
attrs: [$($attr:meta),*],
) => {
paste::paste! {
#[test]
$(#[$attr])*
fn [< standalone_ $fn_name >]() {
type Op = libm_test::op::$fn_name::Routine;
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GeneratorKind::List);
let cases = case_list::get_test_cases_standalone::<Op>(&ctx);
standalone_runner::<Op>(&ctx, cases);
}
}
};
}
libm_macros::for_each_function! {
callback: mp_tests,
}

View file

@ -0,0 +1,147 @@
//! Test the u256 implementation. the ops already get exercised reasonably well through the `f128`
//! routines, so this only does a few million fuzz iterations against GMP.
#![cfg(feature = "build-mpfr")]
use std::sync::LazyLock;
use libm::support::{HInt, u256};
type BigInt = rug::Integer;
use libm_test::bigint_fuzz_iteration_count;
use libm_test::generate::random::SEED;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use rug::Assign;
use rug::integer::Order;
use rug::ops::NotAssign;
static BIGINT_U256_MAX: LazyLock<BigInt> =
LazyLock::new(|| BigInt::from_digits(&[u128::MAX, u128::MAX], Order::Lsf));
/// Copied from the test module.
fn hexu(v: u256) -> String {
format!("0x{:032x}{:032x}", v.hi, v.lo)
}
fn random_u256(rng: &mut ChaCha8Rng) -> u256 {
let lo: u128 = rng.random();
let hi: u128 = rng.random();
u256 { lo, hi }
}
fn assign_bigint(bx: &mut BigInt, x: u256) {
bx.assign_digits(&[x.lo, x.hi], Order::Lsf);
}
fn from_bigint(bx: &mut BigInt) -> u256 {
// Truncate so the result fits into `[u128; 2]`. This makes all ops overflowing.
*bx &= &*BIGINT_U256_MAX;
let mut bres = [0u128, 0];
bx.write_digits(&mut bres, Order::Lsf);
bx.assign(0);
u256 { lo: bres[0], hi: bres[1] }
}
fn check_one(
x: impl FnOnce() -> String,
y: impl FnOnce() -> Option<String>,
actual: u256,
expected: &mut BigInt,
) {
let expected = from_bigint(expected);
if actual != expected {
let xmsg = x();
let ymsg = y().map(|y| format!("y: {y}\n")).unwrap_or_default();
panic!(
"Results do not match\n\
input: {xmsg}\n\
{ymsg}\
actual: {}\n\
expected: {}\
",
hexu(actual),
hexu(expected),
)
}
}
#[test]
fn mp_u256_bitor() {
let mut rng = ChaCha8Rng::from_seed(*SEED);
let mut bx = BigInt::new();
let mut by = BigInt::new();
for _ in 0..bigint_fuzz_iteration_count() {
let x = random_u256(&mut rng);
let y = random_u256(&mut rng);
assign_bigint(&mut bx, x);
assign_bigint(&mut by, y);
let actual = x | y;
bx |= &by;
check_one(|| hexu(x), || Some(hexu(y)), actual, &mut bx);
}
}
#[test]
fn mp_u256_not() {
let mut rng = ChaCha8Rng::from_seed(*SEED);
let mut bx = BigInt::new();
for _ in 0..bigint_fuzz_iteration_count() {
let x = random_u256(&mut rng);
assign_bigint(&mut bx, x);
let actual = !x;
bx.not_assign();
check_one(|| hexu(x), || None, actual, &mut bx);
}
}
#[test]
fn mp_u256_add() {
let mut rng = ChaCha8Rng::from_seed(*SEED);
let mut bx = BigInt::new();
let mut by = BigInt::new();
for _ in 0..bigint_fuzz_iteration_count() {
let x = random_u256(&mut rng);
let y = random_u256(&mut rng);
assign_bigint(&mut bx, x);
assign_bigint(&mut by, y);
let actual = x + y;
bx += &by;
check_one(|| hexu(x), || Some(hexu(y)), actual, &mut bx);
}
}
#[test]
fn mp_u256_shr() {
let mut rng = ChaCha8Rng::from_seed(*SEED);
let mut bx = BigInt::new();
for _ in 0..bigint_fuzz_iteration_count() {
let x = random_u256(&mut rng);
let shift: u32 = rng.random_range(0..255);
assign_bigint(&mut bx, x);
let actual = x >> shift;
bx >>= shift;
check_one(|| hexu(x), || Some(shift.to_string()), actual, &mut bx);
}
}
#[test]
fn mp_u256_widen_mul() {
let mut rng = ChaCha8Rng::from_seed(*SEED);
let mut bx = BigInt::new();
let mut by = BigInt::new();
for _ in 0..bigint_fuzz_iteration_count() {
let x: u128 = rng.random();
let y: u128 = rng.random();
bx.assign(x);
by.assign(y);
let actual = x.widen_mul(y);
bx *= &by;
check_one(|| format!("{x:#034x}"), || Some(format!("{y:#034x}")), actual, &mut bx);
}
}

View file

@ -0,0 +1,14 @@
//! `main` is just a wrapper to handle configuration.
#[cfg(not(feature = "build-mpfr"))]
fn main() {
eprintln!("multiprecision not enabled; skipping extensive tests");
}
#[cfg(feature = "build-mpfr")]
mod run;
#[cfg(feature = "build-mpfr")]
fn main() {
run::run();
}

View file

@ -0,0 +1,233 @@
//! Exhaustive tests for `f16` and `f32`, high-iteration for `f64` and `f128`.
use std::fmt;
use std::io::{self, IsTerminal};
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::Duration;
use indicatif::{ProgressBar, ProgressStyle};
use libm_test::generate::spaced;
use libm_test::mpfloat::MpOp;
use libm_test::{
CheckBasis, CheckCtx, CheckOutput, GeneratorKind, MathOp, TestResult, TupleCall,
skip_extensive_test,
};
use libtest_mimic::{Arguments, Trial};
use rayon::prelude::*;
use spaced::SpacedInput;
const BASIS: CheckBasis = CheckBasis::Mpfr;
const GEN_KIND: GeneratorKind = GeneratorKind::Extensive;
/// Run the extensive test suite.
pub fn run() {
let mut args = Arguments::from_args();
// Prevent multiple tests from running in parallel, each test gets parallized internally.
args.test_threads = Some(1);
let tests = register_all_tests();
// With default parallelism, the CPU doesn't saturate. We don't need to be nice to
// other processes, so do 1.5x to make sure we use all available resources.
let threads = std::thread::available_parallelism().map(Into::into).unwrap_or(0) * 3 / 2;
rayon::ThreadPoolBuilder::new().num_threads(threads).build_global().unwrap();
libtest_mimic::run(&args, tests).exit();
}
macro_rules! mp_extensive_tests {
(
fn_name: $fn_name:ident,
attrs: [$($attr:meta),*],
extra: [$push_to:ident],
) => {
$(#[$attr])*
register_single_test::<libm_test::op::$fn_name::Routine>(&mut $push_to);
};
}
/// Create a list of tests for consumption by `libtest_mimic`.
fn register_all_tests() -> Vec<Trial> {
let mut all_tests = Vec::new();
libm_macros::for_each_function! {
callback: mp_extensive_tests,
extra: [all_tests],
skip: [
// FIXME: test needed, see
// https://github.com/rust-lang/libm/pull/311#discussion_r1818273392
nextafter,
nextafterf,
],
}
all_tests
}
/// Add a single test to the list.
fn register_single_test<Op>(all: &mut Vec<Trial>)
where
Op: MathOp + MpOp,
Op::RustArgs: SpacedInput<Op> + Send,
{
let test_name = format!("mp_extensive_{}", Op::NAME);
let ctx = CheckCtx::new(Op::IDENTIFIER, BASIS, GEN_KIND);
let skip = skip_extensive_test(&ctx);
let runner = move || {
if !cfg!(optimizations_enabled) {
panic!("extensive tests should be run with --release");
}
let res = run_single_test::<Op>(&ctx);
let e = match res {
Ok(()) => return Ok(()),
Err(e) => e,
};
// Format with the `Debug` implementation so we get the error cause chain, and print it
// here so we see the result immediately (rather than waiting for all tests to conclude).
let e = format!("{e:?}");
eprintln!("failure testing {}:{e}\n", Op::IDENTIFIER);
Err(e.into())
};
all.push(Trial::test(test_name, runner).with_ignored_flag(skip));
}
/// Test runner for a signle routine.
fn run_single_test<Op>(ctx: &CheckCtx) -> TestResult
where
Op: MathOp + MpOp,
Op::RustArgs: SpacedInput<Op> + Send,
{
// Small delay before printing anything so other output from the runner has a chance to flush.
std::thread::sleep(Duration::from_millis(500));
eprintln!();
let completed = AtomicU64::new(0);
let (ref mut cases, total) = spaced::get_test_cases::<Op>(ctx);
let pb = Progress::new(Op::NAME, total);
let test_single_chunk = |mp_vals: &mut Op::MpTy, input_vec: Vec<Op::RustArgs>| -> TestResult {
for input in input_vec {
// Test the input.
let mp_res = Op::run(mp_vals, input);
let crate_res = input.call_intercept_panics(Op::ROUTINE);
crate_res.validate(mp_res, input, ctx)?;
let completed = completed.fetch_add(1, Ordering::Relaxed) + 1;
pb.update(completed, input);
}
Ok(())
};
// Chunk the cases so Rayon doesn't switch threads between each iterator item. 50k seems near
// a performance sweet spot. Ideally we would reuse these allocations rather than discarding,
// but that is difficult with Rayon's API.
let chunk_size = 50_000;
let chunks = std::iter::from_fn(move || {
let mut v = Vec::with_capacity(chunk_size);
v.extend(cases.take(chunk_size));
(!v.is_empty()).then_some(v)
});
// Run the actual tests
let res = chunks.par_bridge().try_for_each_init(Op::new_mp, test_single_chunk);
let real_total = completed.load(Ordering::Relaxed);
pb.complete(real_total);
if res.is_ok() && real_total != total {
// Provide a warning if our estimate needs to be updated.
panic!("total run {real_total} does not match expected {total}");
}
res
}
/// Wrapper around a `ProgressBar` that handles styles and non-TTY messages.
struct Progress {
pb: ProgressBar,
name_padded: String,
final_style: ProgressStyle,
is_tty: bool,
}
impl Progress {
const PB_TEMPLATE: &str = "[{elapsed:3} {percent:3}%] {bar:20.cyan/blue} NAME \
{human_pos:>13}/{human_len:13} {per_sec:18} eta {eta:8} {msg}";
const PB_TEMPLATE_FINAL: &str = "[{elapsed:3} {percent:3}%] {bar:20.cyan/blue} NAME \
{human_pos:>13}/{human_len:13} {per_sec:18} done in {elapsed_precise}";
fn new(name: &str, total: u64) -> Self {
eprintln!("starting extensive tests for `{name}`");
let name_padded = format!("{name:9}");
let is_tty = io::stderr().is_terminal();
let initial_style =
ProgressStyle::with_template(&Self::PB_TEMPLATE.replace("NAME", &name_padded))
.unwrap()
.progress_chars("##-");
let final_style =
ProgressStyle::with_template(&Self::PB_TEMPLATE_FINAL.replace("NAME", &name_padded))
.unwrap()
.progress_chars("##-");
let pb = ProgressBar::new(total);
pb.set_style(initial_style);
Self { pb, final_style, name_padded, is_tty }
}
fn update(&self, completed: u64, input: impl fmt::Debug) {
// Infrequently update the progress bar.
if completed % 20_000 == 0 {
self.pb.set_position(completed);
}
if completed % 500_000 == 0 {
self.pb.set_message(format!("input: {input:<24?}"));
}
if !self.is_tty && completed % 5_000_000 == 0 {
let len = self.pb.length().unwrap_or_default();
eprintln!(
"[{elapsed:3?}s {percent:3.0}%] {name} \
{human_pos:>10}/{human_len:<10} {per_sec:14.2}/s eta {eta:4}s {input:<24?}",
elapsed = self.pb.elapsed().as_secs(),
percent = completed as f32 * 100.0 / len as f32,
name = self.name_padded,
human_pos = completed,
human_len = len,
per_sec = self.pb.per_sec(),
eta = self.pb.eta().as_secs()
);
}
}
fn complete(self, real_total: u64) {
self.pb.set_style(self.final_style);
self.pb.set_position(real_total);
self.pb.abandon();
if !self.is_tty {
let len = self.pb.length().unwrap_or_default();
eprintln!(
"[{elapsed:3}s {percent:3.0}%] {name} \
{human_pos:>10}/{human_len:<10} {per_sec:14.2}/s done in {elapsed_precise}",
elapsed = self.pb.elapsed().as_secs(),
percent = real_total as f32 * 100.0 / len as f32,
name = self.name_padded,
human_pos = real_total,
human_len = len,
per_sec = self.pb.per_sec(),
elapsed_precise = self.pb.elapsed().as_secs(),
);
}
eprintln!();
}
}

View file

@ -0,0 +1,13 @@
[package]
name = "musl-math-sys"
version = "0.1.0"
edition = "2024"
publish = false
[dependencies]
[dev-dependencies]
libm = { path = "../../libm" }
[build-dependencies]
cc = "1.2.16"

View file

@ -0,0 +1,319 @@
use std::collections::BTreeMap;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::{env, fs, str};
/// Static library that will be built
const LIB_NAME: &str = "musl_math_prefixed";
/// Files that have more than one symbol. Map of file names to the symbols defined in that file.
const MULTIPLE_SYMBOLS: &[(&str, &[&str])] = &[
("__invtrigl", &["__invtrigl", "__invtrigl_R", "__pio2_hi", "__pio2_lo"]),
("__polevll", &["__polevll", "__p1evll"]),
("erf", &["erf", "erfc"]),
("erff", &["erff", "erfcf"]),
("erfl", &["erfl", "erfcl"]),
("exp10", &["exp10", "pow10"]),
("exp10f", &["exp10f", "pow10f"]),
("exp10l", &["exp10l", "pow10l"]),
("exp2f_data", &["exp2f_data", "__exp2f_data"]),
("exp_data", &["exp_data", "__exp_data"]),
("j0", &["j0", "y0"]),
("j0f", &["j0f", "y0f"]),
("j1", &["j1", "y1"]),
("j1f", &["j1f", "y1f"]),
("jn", &["jn", "yn"]),
("jnf", &["jnf", "ynf"]),
("lgamma", &["lgamma", "__lgamma_r"]),
("remainder", &["remainder", "drem"]),
("remainderf", &["remainderf", "dremf"]),
("lgammaf", &["lgammaf", "lgammaf_r", "__lgammaf_r"]),
("lgammal", &["lgammal", "lgammal_r", "__lgammal_r"]),
("log2_data", &["log2_data", "__log2_data"]),
("log2f_data", &["log2f_data", "__log2f_data"]),
("log_data", &["log_data", "__log_data"]),
("logf_data", &["logf_data", "__logf_data"]),
("pow_data", &["pow_data", "__pow_log_data"]),
("powf_data", &["powf_data", "__powf_log2_data"]),
("signgam", &["signgam", "__signgam"]),
("sqrt_data", &["sqrt_data", "__rsqrt_tab"]),
];
fn main() {
let cfg = Config::from_env();
if cfg.target_env == "msvc"
|| cfg.target_family == "wasm"
|| cfg.target_features.iter().any(|f| f == "thumb-mode")
{
println!(
"cargo::warning=Musl doesn't compile with the current \
target {}; skipping build",
&cfg.target_string
);
return;
}
build_musl_math(&cfg);
}
#[allow(dead_code)]
#[derive(Debug)]
struct Config {
manifest_dir: PathBuf,
out_dir: PathBuf,
musl_dir: PathBuf,
musl_arch: String,
target_arch: String,
target_env: String,
target_family: String,
target_os: String,
target_string: String,
target_vendor: String,
target_features: Vec<String>,
}
impl Config {
fn from_env() -> Self {
let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap());
let target_features = env::var("CARGO_CFG_TARGET_FEATURE")
.map(|feats| feats.split(',').map(ToOwned::to_owned).collect())
.unwrap_or_default();
let musl_dir = manifest_dir.join("musl");
let target_arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap();
let musl_arch = if target_arch == "x86" { "i386".to_owned() } else { target_arch.clone() };
println!("cargo::rerun-if-changed={}/c_patches", manifest_dir.display());
println!("cargo::rerun-if-changed={}", musl_dir.display());
Self {
manifest_dir,
out_dir: PathBuf::from(env::var("OUT_DIR").unwrap()),
musl_dir,
musl_arch,
target_arch,
target_env: env::var("CARGO_CFG_TARGET_ENV").unwrap(),
target_family: env::var("CARGO_CFG_TARGET_FAMILY").unwrap(),
target_os: env::var("CARGO_CFG_TARGET_OS").unwrap(),
target_string: env::var("TARGET").unwrap(),
target_vendor: env::var("CARGO_CFG_TARGET_VENDOR").unwrap(),
target_features,
}
}
}
/// Build musl math symbols to a static library
fn build_musl_math(cfg: &Config) {
let musl_dir = &cfg.musl_dir;
let math = musl_dir.join("src/math");
let arch_dir = musl_dir.join("arch").join(&cfg.musl_arch);
assert!(math.exists(), "musl source not found. Is the submodule up to date?");
let source_map = find_math_source(&math, cfg);
let out_path = cfg.out_dir.join(format!("lib{LIB_NAME}.a"));
// Run configuration steps. Usually done as part of the musl `Makefile`.
let obj_include = cfg.out_dir.join("musl_obj/include");
fs::create_dir_all(&obj_include).unwrap();
fs::create_dir_all(obj_include.join("bits")).unwrap();
let sed_stat = Command::new("sed")
.arg("-f")
.arg(musl_dir.join("tools/mkalltypes.sed"))
.arg(arch_dir.join("bits/alltypes.h.in"))
.arg(musl_dir.join("include/alltypes.h.in"))
.stderr(Stdio::inherit())
.output()
.unwrap();
assert!(sed_stat.status.success(), "sed command failed: {:?}", sed_stat.status);
fs::write(obj_include.join("bits/alltypes.h"), sed_stat.stdout).unwrap();
let mut cbuild = cc::Build::new();
cbuild
.extra_warnings(false)
.warnings(false)
.flag_if_supported("-Wno-bitwise-op-parentheses")
.flag_if_supported("-Wno-literal-range")
.flag_if_supported("-Wno-parentheses")
.flag_if_supported("-Wno-shift-count-overflow")
.flag_if_supported("-Wno-shift-op-parentheses")
.flag_if_supported("-Wno-unused-but-set-variable")
.flag_if_supported("-std=c99")
.flag_if_supported("-ffreestanding")
.flag_if_supported("-nostdinc")
.define("_ALL_SOURCE", "1")
.define(
"ROOT_INCLUDE_FEATURES",
Some(musl_dir.join("include/features.h").to_str().unwrap()),
)
// Our overrides are in this directory
.include(cfg.manifest_dir.join("c_patches"))
.include(musl_dir.join("arch").join(&cfg.musl_arch))
.include(musl_dir.join("arch/generic"))
.include(musl_dir.join("src/include"))
.include(musl_dir.join("src/internal"))
.include(obj_include)
.include(musl_dir.join("include"))
.file(cfg.manifest_dir.join("c_patches/alias.c"));
for (sym_name, src_file) in source_map {
// Build the source file
cbuild.file(src_file);
// Trickery! Redefine the symbol names to have the prefix `musl_`, which allows us to
// differentiate these symbols from whatever we provide.
if let Some((_names, syms)) =
MULTIPLE_SYMBOLS.iter().find(|(name, _syms)| *name == sym_name)
{
// Handle the occasional file that defines multiple symbols
for sym in *syms {
cbuild.define(sym, Some(format!("musl_{sym}").as_str()));
}
} else {
// If the file doesn't define multiple symbols, the file name will be the symbol
cbuild.define(&sym_name, Some(format!("musl_{sym_name}").as_str()));
}
}
if cfg!(windows) {
// On Windows we don't have a good way to check symbols, so skip that step.
cbuild.compile(LIB_NAME);
return;
}
let objfiles = cbuild.compile_intermediates();
// We create the archive ourselves with relocations rather than letting `cc` do it so we can
// encourage it to resolve symbols now. This should help avoid accidentally linking the wrong
// thing.
let stat = cbuild
.get_compiler()
.to_command()
.arg("-r")
.arg("-o")
.arg(&out_path)
.args(objfiles)
.status()
.unwrap();
assert!(stat.success());
println!("cargo::rustc-link-lib={LIB_NAME}");
println!("cargo::rustc-link-search=native={}", cfg.out_dir.display());
validate_archive_symbols(&out_path);
}
/// Build a map of `name -> path`. `name` is typically the symbol name, but this doesn't account
/// for files that provide multiple symbols.
fn find_math_source(math_root: &Path, cfg: &Config) -> BTreeMap<String, PathBuf> {
let mut map = BTreeMap::new();
let mut arch_dir = None;
// Locate all files and directories
for item in fs::read_dir(math_root).unwrap() {
let path = item.unwrap().path();
let meta = fs::metadata(&path).unwrap();
if meta.is_dir() {
// Make note of the arch-specific directory if it exists
if path.file_name().unwrap() == cfg.target_arch.as_str() {
arch_dir = Some(path);
}
continue;
}
// Skip non-source files
if path.extension().is_some_and(|ext| ext == "h") {
continue;
}
let sym_name = path.file_stem().unwrap();
map.insert(sym_name.to_str().unwrap().to_owned(), path.to_owned());
}
// If arch-specific versions are available, build those instead.
if let Some(arch_dir) = arch_dir {
for item in fs::read_dir(arch_dir).unwrap() {
let path = item.unwrap().path();
let sym_name = path.file_stem().unwrap();
if path.extension().unwrap() == "s" {
// FIXME: we never build assembly versions since we have no good way to
// rename the symbol (our options are probably preprocessor or objcopy).
continue;
}
map.insert(sym_name.to_str().unwrap().to_owned(), path);
}
}
map
}
/// Make sure we don't have something like a loose unprefixed `_cos` called somewhere, which could
/// wind up linking to system libraries rather than the built musl library.
fn validate_archive_symbols(out_path: &Path) {
const ALLOWED_UNDEF_PFX: &[&str] = &[
// PIC and arch-specific
".TOC",
"_GLOBAL_OFFSET_TABLE_",
"__x86.get_pc_thunk",
// gcc/compiler-rt/compiler-builtins symbols
"__add",
"__aeabi_",
"__div",
"__eq",
"__extend",
"__fix",
"__float",
"__gcc_",
"__ge",
"__gt",
"__le",
"__lshr",
"__lt",
"__mul",
"__ne",
"__stack_chk_fail",
"__stack_chk_guard",
"__sub",
"__trunc",
"__undef",
// string routines
"__bzero",
"bzero",
// FPENV interfaces
"feclearexcept",
"fegetround",
"feraiseexcept",
"fesetround",
"fetestexcept",
];
// List global undefined symbols
let out =
Command::new("nm").arg("-guj").arg(out_path).stderr(Stdio::inherit()).output().unwrap();
let undef = str::from_utf8(&out.stdout).unwrap();
let mut undef = undef.lines().collect::<Vec<_>>();
undef.retain(|sym| {
// Account for file formats that add a leading `_`
!ALLOWED_UNDEF_PFX.iter().any(|pfx| sym.starts_with(pfx) || sym[1..].starts_with(pfx))
});
assert!(undef.is_empty(), "found disallowed undefined symbols: {undef:#?}");
// Find any symbols that are missing the `_musl_` prefix`
let out =
Command::new("nm").arg("-gUj").arg(out_path).stderr(Stdio::inherit()).output().unwrap();
let defined = str::from_utf8(&out.stdout).unwrap();
let mut defined = defined.lines().collect::<Vec<_>>();
defined.retain(|sym| {
!(sym.starts_with("_musl_")
|| sym.starts_with("musl_")
|| sym.starts_with("__x86.get_pc_thunk"))
});
assert!(defined.is_empty(), "found unprefixed symbols: {defined:#?}");
}

View file

@ -0,0 +1,40 @@
/* On platforms that don't support weak symbols, define required aliases
* as wrappers. See comments in `features.h` for more.
*/
#if defined(__APPLE__) || defined(__MINGW32__)
double __lgamma_r(double a, int *b);
float __lgammaf_r(float a, int *b);
long __lgammal_r(long double a, int *b);
double exp10(double a);
float exp10f(float a);
long exp10l(long double a);
double remainder(double a, double b);
float remainderf(float a, float b);
double lgamma_r(double a, int *b) {
return __lgamma_r(a, b);
}
float lgammaf_r(float a, int *b) {
return __lgammaf_r(a, b);
}
long double lgammal_r(long double a, int *b) {
return __lgammal_r(a, b);
}
double pow10(double a) {
return exp10(a);
}
float pow10f(float a) {
return exp10f(a);
}
long double pow10l(long double a) {
return exp10l(a);
}
double drem(double a, double b) {
return remainder(a, b);
}
float dremf(float a, float b) {
return remainderf(a, b);
}
#endif

View file

@ -0,0 +1,39 @@
/* This is meant to override Musl's src/include/features.h
*
* We use a separate file here to redefine some attributes that don't work on
* all platforms that we would like to build on.
*/
#ifndef FEATURES_H
#define FEATURES_H
/* Get the required `#include "../../include/features.h"` since we can't use
* the relative path. The C macros need double indirection to get a usable
* string. */
#define _stringify_inner(s) #s
#define _stringify(s) _stringify_inner(s)
#include _stringify(ROOT_INCLUDE_FEATURES)
#if defined(__APPLE__)
#define weak __attribute__((__weak__))
#define hidden __attribute__((__visibility__("hidden")))
/* We _should_ be able to define this as:
* _Pragma(_stringify(weak musl_ ## new = musl_ ## old))
* However, weak symbols aren't handled correctly [1]. So we manually write
* wrappers, which are in `alias.c`.
*
* [1]: https://github.com/llvm/llvm-project/issues/111321
*/
#define weak_alias(old, new) /* nothing */
#else
#define weak __attribute__((__weak__))
#define hidden __attribute__((__visibility__("hidden")))
#define weak_alias(old, new) \
extern __typeof(old) musl_ ## new \
__attribute__((__weak__, __alias__(_stringify(musl_ ## old))))
#endif /* defined(__APPLE__) */
#endif

@ -0,0 +1 @@
Subproject commit 61399d4bd02ae1ec03068445aa7ffe9174466bfd

View file

@ -0,0 +1,287 @@
//! Bindings to Musl math functions (these are built in `build.rs`).
use std::ffi::{c_char, c_int, c_long};
/// Macro for creating bindings and exposing a safe function (since the implementations have no
/// preconditions). Included functions must have correct signatures, otherwise this will be
/// unsound.
macro_rules! functions {
( $(
$( #[$meta:meta] )*
$pfx_name:ident: $name:ident( $($arg:ident: $aty:ty),+ ) -> $rty:ty;
)* ) => {
unsafe extern "C" {
$( fn $pfx_name( $($arg: $aty),+ ) -> $rty; )*
}
$(
// Expose a safe version
$( #[$meta] )*
pub fn $name( $($arg: $aty),+ ) -> $rty {
// SAFETY: FFI calls with no preconditions
unsafe { $pfx_name( $($arg),+ ) }
}
)*
#[cfg(test)]
mod tests {
use super::*;
use test_support::CallTest;
$( functions!(
@single_test
$name($($arg: $aty),+) -> $rty
); )*
}
};
(@single_test
$name:ident( $($arg:ident: $aty:ty),+ ) -> $rty:ty
) => {
// Run a simple check to ensure we can link and call the function without crashing.
#[test]
// FIXME(#309): LE PPC crashes calling some musl functions
#[cfg_attr(all(target_arch = "powerpc64", target_endian = "little"), ignore)]
fn $name() {
<fn($($aty),+) -> $rty>::check(super::$name);
}
};
}
#[cfg(test)]
mod test_support {
use core::ffi::c_char;
/// Just verify that we are able to call the function.
pub trait CallTest {
fn check(f: Self);
}
macro_rules! impl_calltest {
($( ($($arg:ty),*) -> $ret:ty; )*) => {
$(
impl CallTest for fn($($arg),*) -> $ret {
fn check(f: Self) {
f($(1 as $arg),*);
}
}
)*
};
}
impl_calltest! {
(f32) -> f32;
(f64) -> f64;
(f32, f32) -> f32;
(f64, f64) -> f64;
(i32, f32) -> f32;
(i32, f64) -> f64;
(f32, f32, f32) -> f32;
(f64, f64, f64) -> f64;
(f32, i32) -> f32;
(f32, i64) -> f32;
(f32) -> i32;
(f64) -> i32;
(f64, i32) -> f64;
(f64, i64) -> f64;
}
impl CallTest for fn(f32, &mut f32) -> f32 {
fn check(f: Self) {
let mut tmp = 0.0;
f(0.0, &mut tmp);
}
}
impl CallTest for fn(f64, &mut f64) -> f64 {
fn check(f: Self) {
let mut tmp = 0.0;
f(0.0, &mut tmp);
}
}
impl CallTest for fn(f32, &mut i32) -> f32 {
fn check(f: Self) {
let mut tmp = 1;
f(0.0, &mut tmp);
}
}
impl CallTest for fn(f64, &mut i32) -> f64 {
fn check(f: Self) {
let mut tmp = 1;
f(0.0, &mut tmp);
}
}
impl CallTest for fn(f32, f32, &mut i32) -> f32 {
fn check(f: Self) {
let mut tmp = 1;
f(0.0, 0.0, &mut tmp);
}
}
impl CallTest for fn(f64, f64, &mut i32) -> f64 {
fn check(f: Self) {
let mut tmp = 1;
f(0.0, 0.0, &mut tmp);
}
}
impl CallTest for fn(f32, &mut f32, &mut f32) {
fn check(f: Self) {
let mut tmp1 = 1.0;
let mut tmp2 = 1.0;
f(0.0, &mut tmp1, &mut tmp2);
}
}
impl CallTest for fn(f64, &mut f64, &mut f64) {
fn check(f: Self) {
let mut tmp1 = 1.0;
let mut tmp2 = 1.0;
f(0.0, &mut tmp1, &mut tmp2);
}
}
impl CallTest for fn(*const c_char) -> f32 {
fn check(f: Self) {
f(c"1".as_ptr());
}
}
impl CallTest for fn(*const c_char) -> f64 {
fn check(f: Self) {
f(c"1".as_ptr());
}
}
}
functions! {
musl_acos: acos(a: f64) -> f64;
musl_acosf: acosf(a: f32) -> f32;
musl_acosh: acosh(a: f64) -> f64;
musl_acoshf: acoshf(a: f32) -> f32;
musl_asin: asin(a: f64) -> f64;
musl_asinf: asinf(a: f32) -> f32;
musl_asinh: asinh(a: f64) -> f64;
musl_asinhf: asinhf(a: f32) -> f32;
musl_atan2: atan2(a: f64, b: f64) -> f64;
musl_atan2f: atan2f(a: f32, b: f32) -> f32;
musl_atan: atan(a: f64) -> f64;
musl_atanf: atanf(a: f32) -> f32;
musl_atanh: atanh(a: f64) -> f64;
musl_atanhf: atanhf(a: f32) -> f32;
musl_cbrt: cbrt(a: f64) -> f64;
musl_cbrtf: cbrtf(a: f32) -> f32;
musl_ceil: ceil(a: f64) -> f64;
musl_ceilf: ceilf(a: f32) -> f32;
musl_copysign: copysign(a: f64, b: f64) -> f64;
musl_copysignf: copysignf(a: f32, b: f32) -> f32;
musl_cos: cos(a: f64) -> f64;
musl_cosf: cosf(a: f32) -> f32;
musl_cosh: cosh(a: f64) -> f64;
musl_coshf: coshf(a: f32) -> f32;
musl_drem: drem(a: f64, b: f64) -> f64;
musl_dremf: dremf(a: f32, b: f32) -> f32;
musl_erf: erf(a: f64) -> f64;
musl_erfc: erfc(a: f64) -> f64;
musl_erfcf: erfcf(a: f32) -> f32;
musl_erff: erff(a: f32) -> f32;
musl_exp10: exp10(a: f64) -> f64;
musl_exp10f: exp10f(a: f32) -> f32;
musl_exp2: exp2(a: f64) -> f64;
musl_exp2f: exp2f(a: f32) -> f32;
musl_exp: exp(a: f64) -> f64;
musl_expf: expf(a: f32) -> f32;
musl_expm1: expm1(a: f64) -> f64;
musl_expm1f: expm1f(a: f32) -> f32;
musl_fabs: fabs(a: f64) -> f64;
musl_fabsf: fabsf(a: f32) -> f32;
musl_fdim: fdim(a: f64, b: f64) -> f64;
musl_fdimf: fdimf(a: f32, b: f32) -> f32;
musl_finite: finite(a: f64) -> c_int;
musl_finitef: finitef(a: f32) -> c_int;
musl_floor: floor(a: f64) -> f64;
musl_floorf: floorf(a: f32) -> f32;
musl_fma: fma(a: f64, b: f64, c: f64) -> f64;
musl_fmaf: fmaf(a: f32, b: f32, c: f32) -> f32;
musl_fmax: fmax(a: f64, b: f64) -> f64;
musl_fmaxf: fmaxf(a: f32, b: f32) -> f32;
musl_fmin: fmin(a: f64, b: f64) -> f64;
musl_fminf: fminf(a: f32, b: f32) -> f32;
musl_fmod: fmod(a: f64, b: f64) -> f64;
musl_fmodf: fmodf(a: f32, b: f32) -> f32;
musl_frexp: frexp(a: f64, b: &mut c_int) -> f64;
musl_frexpf: frexpf(a: f32, b: &mut c_int) -> f32;
musl_hypot: hypot(a: f64, b: f64) -> f64;
musl_hypotf: hypotf(a: f32, b: f32) -> f32;
musl_ilogb: ilogb(a: f64) -> c_int;
musl_ilogbf: ilogbf(a: f32) -> c_int;
musl_j0: j0(a: f64) -> f64;
musl_j0f: j0f(a: f32) -> f32;
musl_j1: j1(a: f64) -> f64;
musl_j1f: j1f(a: f32) -> f32;
musl_jn: jn(a: c_int, b: f64) -> f64;
musl_jnf: jnf(a: c_int, b: f32) -> f32;
musl_ldexp: ldexp(a: f64, b: c_int) -> f64;
musl_ldexpf: ldexpf(a: f32, b: c_int) -> f32;
musl_lgamma: lgamma(a: f64) -> f64;
musl_lgamma_r: lgamma_r(a: f64, b: &mut c_int) -> f64;
musl_lgammaf: lgammaf(a: f32) -> f32;
musl_lgammaf_r: lgammaf_r(a: f32, b: &mut c_int) -> f32;
musl_log10: log10(a: f64) -> f64;
musl_log10f: log10f(a: f32) -> f32;
musl_log1p: log1p(a: f64) -> f64;
musl_log1pf: log1pf(a: f32) -> f32;
musl_log2: log2(a: f64) -> f64;
musl_log2f: log2f(a: f32) -> f32;
musl_log: log(a: f64) -> f64;
musl_logb: logb(a: f64) -> f64;
musl_logbf: logbf(a: f32) -> f32;
musl_logf: logf(a: f32) -> f32;
musl_modf: modf(a: f64, b: &mut f64) -> f64;
musl_modff: modff(a: f32, b: &mut f32) -> f32;
// FIXME: these need to be unsafe
#[allow(clippy::not_unsafe_ptr_arg_deref)]
musl_nan: nan(a: *const c_char) -> f64;
#[allow(clippy::not_unsafe_ptr_arg_deref)]
musl_nanf: nanf(a: *const c_char) -> f32;
musl_nearbyint: nearbyint(a: f64) -> f64;
musl_nearbyintf: nearbyintf(a: f32) -> f32;
musl_nextafter: nextafter(a: f64, b: f64) -> f64;
musl_nextafterf: nextafterf(a: f32, b: f32) -> f32;
musl_pow10: pow10(a: f64) -> f64;
musl_pow10f: pow10f(a: f32) -> f32;
musl_pow: pow(a: f64, b: f64) -> f64;
musl_powf: powf(a: f32, b: f32) -> f32;
musl_remainder: remainder(a: f64, b: f64) -> f64;
musl_remainderf: remainderf(a: f32, b: f32) -> f32;
musl_remquo: remquo(a: f64, b: f64, c: &mut c_int) -> f64;
musl_remquof: remquof(a: f32, b: f32, c: &mut c_int) -> f32;
musl_rint: rint(a: f64) -> f64;
musl_rintf: rintf(a: f32) -> f32;
musl_round: round(a: f64) -> f64;
musl_roundf: roundf(a: f32) -> f32;
musl_scalbln: scalbln(a: f64, b: c_long) -> f64;
musl_scalblnf: scalblnf(a: f32, b: c_long) -> f32;
musl_scalbn: scalbn(a: f64, b: c_int) -> f64;
musl_scalbnf: scalbnf(a: f32, b: c_int) -> f32;
musl_significand: significand(a: f64) -> f64;
musl_significandf: significandf(a: f32) -> f32;
musl_sin: sin(a: f64) -> f64;
musl_sincos: sincos(a: f64, b: &mut f64, c: &mut f64) -> ();
musl_sincosf: sincosf(a: f32, b: &mut f32, c: &mut f32) -> ();
musl_sinf: sinf(a: f32) -> f32;
musl_sinh: sinh(a: f64) -> f64;
musl_sinhf: sinhf(a: f32) -> f32;
musl_sqrt: sqrt(a: f64) -> f64;
musl_sqrtf: sqrtf(a: f32) -> f32;
musl_tan: tan(a: f64) -> f64;
musl_tanf: tanf(a: f32) -> f32;
musl_tanh: tanh(a: f64) -> f64;
musl_tanhf: tanhf(a: f32) -> f32;
musl_tgamma: tgamma(a: f64) -> f64;
musl_tgammaf: tgammaf(a: f32) -> f32;
musl_trunc: trunc(a: f64) -> f64;
musl_truncf: truncf(a: f32) -> f32;
musl_y0: y0(a: f64) -> f64;
musl_y0f: y0f(a: f32) -> f32;
musl_y1: y1(a: f64) -> f64;
musl_y1f: y1f(a: f32) -> f32;
musl_yn: yn(a: c_int, b: f64) -> f64;
musl_ynf: ynf(a: c_int, b: f32) -> f32;
}

View file

@ -0,0 +1,18 @@
[package]
name = "util"
version = "0.1.0"
edition = "2024"
publish = false
[features]
default = ["build-musl", "build-mpfr", "unstable-float"]
build-musl = ["libm-test/build-musl", "dep:musl-math-sys"]
build-mpfr = ["libm-test/build-mpfr", "dep:rug"]
unstable-float = ["libm/unstable-float", "libm-test/unstable-float", "rug?/nightly-float"]
[dependencies]
libm = { path = "../../libm", default-features = false }
libm-macros = { path = "../libm-macros" }
libm-test = { path = "../libm-test", default-features = false }
musl-math-sys = { path = "../musl-math-sys", optional = true }
rug = { version = "1.27.0", optional = true, default-features = false, features = ["float", "std"] }

View file

@ -0,0 +1,10 @@
#![allow(unexpected_cfgs)]
#[path = "../../libm/configure.rs"]
mod configure;
fn main() {
println!("cargo:rerun-if-changed=../../libm/configure.rs");
let cfg = configure::Config::from_env();
configure::emit_libm_config(&cfg);
}

View file

@ -0,0 +1,382 @@
//! Helper CLI utility for common tasks.
#![cfg_attr(f16_enabled, feature(f16))]
#![cfg_attr(f128_enabled, feature(f128))]
use std::any::type_name;
use std::env;
use std::num::ParseIntError;
use std::str::FromStr;
use libm::support::{Hexf, hf32, hf64};
#[cfg(feature = "build-mpfr")]
use libm_test::mpfloat::MpOp;
use libm_test::{MathOp, TupleCall};
#[cfg(feature = "build-mpfr")]
use rug::az::{self, Az};
const USAGE: &str = "\
usage:
cargo run -p util -- <SUBCOMMAND>
SUBCOMMAND:
eval <BASIS> <OP> inputs...
Evaulate the expression with a given basis. This can be useful for
running routines with a debugger, or quickly checking input. Examples:
* eval musl sinf 1.234 # print the results of musl sinf(1.234f32)
* eval mpfr pow 1.234 2.432 # print the results of mpfr pow(1.234, 2.432)
";
fn main() {
let args = env::args().collect::<Vec<_>>();
let str_args = args.iter().map(|s| s.as_str()).collect::<Vec<_>>();
match &str_args.as_slice()[1..] {
["eval", basis, op, inputs @ ..] => do_eval(basis, op, inputs),
_ => {
println!("{USAGE}\nunrecognized input `{str_args:?}`");
std::process::exit(1);
}
}
}
macro_rules! handle_call {
(
fn_name: $fn_name:ident,
CFn: $CFn:ty,
RustFn: $RustFn:ty,
RustArgs: $RustArgs:ty,
attrs: [$($attr:meta),*],
extra: ($basis:ident, $op:ident, $inputs:ident),
fn_extra: $musl_fn:expr,
) => {
$(#[$attr])*
if $op == stringify!($fn_name) {
type Op = libm_test::op::$fn_name::Routine;
let input = <$RustArgs>::parse($inputs);
let libm_fn: <Op as MathOp>::RustFn = libm::$fn_name;
let output = match $basis {
"libm" => input.call_intercept_panics(libm_fn),
#[cfg(feature = "build-musl")]
"musl" => {
let musl_fn: <Op as MathOp>::CFn =
$musl_fn.unwrap_or_else(|| panic!("no musl function for {}", $op));
input.call(musl_fn)
}
#[cfg(feature = "build-mpfr")]
"mpfr" => {
let mut mp = <Op as MpOp>::new_mp();
Op::run(&mut mp, input)
}
_ => panic!("unrecognized or disabled basis '{}'", $basis),
};
println!("{output:?} {:x}", Hexf(output));
return;
}
};
}
/// Evaluate the specified operation with a given basis.
fn do_eval(basis: &str, op: &str, inputs: &[&str]) {
libm_macros::for_each_function! {
callback: handle_call,
emit_types: [CFn, RustFn, RustArgs],
extra: (basis, op, inputs),
fn_extra: match MACRO_FN_NAME {
ceilf128
| ceilf16
| copysignf128
| copysignf16
| fabsf128
| fabsf16
| fdimf128
| fdimf16
| floorf128
| floorf16
| fmaf128
| fmaxf128
| fmaxf16
| fmaximum
| fmaximum_num
| fmaximum_numf
| fmaximum_numf128
| fmaximum_numf16
| fmaximumf
| fmaximumf128
| fmaximumf16
| fminf128
| fminf16
| fminimum
| fminimum_num
| fminimum_numf
| fminimum_numf128
| fminimum_numf16
| fminimumf
| fminimumf128
| fminimumf16
| fmodf128
| fmodf16
| ldexpf128
| ldexpf16
| rintf128
| rintf16
| roundeven
| roundevenf
| roundevenf128
| roundevenf16
| roundf128
| roundf16
| scalbnf128
| scalbnf16
| sqrtf128
| sqrtf16
| truncf128
| truncf16 => None,
_ => Some(musl_math_sys::MACRO_FN_NAME)
}
}
panic!("no operation matching {op}");
}
/// Parse a tuple from a space-delimited string.
trait ParseTuple {
fn parse(input: &[&str]) -> Self;
}
macro_rules! impl_parse_tuple {
($ty:ty) => {
impl ParseTuple for ($ty,) {
fn parse(input: &[&str]) -> Self {
assert_eq!(input.len(), 1, "expected a single argument, got {input:?}");
(parse(input, 0),)
}
}
impl ParseTuple for ($ty, $ty) {
fn parse(input: &[&str]) -> Self {
assert_eq!(input.len(), 2, "expected two arguments, got {input:?}");
(parse(input, 0), parse(input, 1))
}
}
impl ParseTuple for ($ty, i32) {
fn parse(input: &[&str]) -> Self {
assert_eq!(input.len(), 2, "expected two arguments, got {input:?}");
(parse(input, 0), parse(input, 1))
}
}
impl ParseTuple for (i32, $ty) {
fn parse(input: &[&str]) -> Self {
assert_eq!(input.len(), 2, "expected two arguments, got {input:?}");
(parse(input, 0), parse(input, 1))
}
}
impl ParseTuple for ($ty, $ty, $ty) {
fn parse(input: &[&str]) -> Self {
assert_eq!(input.len(), 3, "expected three arguments, got {input:?}");
(parse(input, 0), parse(input, 1), parse(input, 2))
}
}
};
}
#[allow(unused_macros)]
#[cfg(feature = "build-mpfr")]
macro_rules! impl_parse_tuple_via_rug {
($ty:ty) => {
impl ParseTuple for ($ty,) {
fn parse(input: &[&str]) -> Self {
assert_eq!(input.len(), 1, "expected a single argument, got {input:?}");
(parse_rug(input, 0),)
}
}
impl ParseTuple for ($ty, $ty) {
fn parse(input: &[&str]) -> Self {
assert_eq!(input.len(), 2, "expected two arguments, got {input:?}");
(parse_rug(input, 0), parse_rug(input, 1))
}
}
impl ParseTuple for ($ty, i32) {
fn parse(input: &[&str]) -> Self {
assert_eq!(input.len(), 2, "expected two arguments, got {input:?}");
(parse_rug(input, 0), parse(input, 1))
}
}
impl ParseTuple for (i32, $ty) {
fn parse(input: &[&str]) -> Self {
assert_eq!(input.len(), 2, "expected two arguments, got {input:?}");
(parse(input, 0), parse_rug(input, 1))
}
}
impl ParseTuple for ($ty, $ty, $ty) {
fn parse(input: &[&str]) -> Self {
assert_eq!(input.len(), 3, "expected three arguments, got {input:?}");
(parse_rug(input, 0), parse_rug(input, 1), parse_rug(input, 2))
}
}
};
}
// Fallback for when Rug is not built.
#[allow(unused_macros)]
#[cfg(not(feature = "build-mpfr"))]
macro_rules! impl_parse_tuple_via_rug {
($ty:ty) => {
impl ParseTuple for ($ty,) {
fn parse(_input: &[&str]) -> Self {
panic!("parsing this type requires the `build-mpfr` feature")
}
}
impl ParseTuple for ($ty, $ty) {
fn parse(_input: &[&str]) -> Self {
panic!("parsing this type requires the `build-mpfr` feature")
}
}
impl ParseTuple for ($ty, i32) {
fn parse(_input: &[&str]) -> Self {
panic!("parsing this type requires the `build-mpfr` feature")
}
}
impl ParseTuple for (i32, $ty) {
fn parse(_input: &[&str]) -> Self {
panic!("parsing this type requires the `build-mpfr` feature")
}
}
impl ParseTuple for ($ty, $ty, $ty) {
fn parse(_input: &[&str]) -> Self {
panic!("parsing this type requires the `build-mpfr` feature")
}
}
};
}
impl_parse_tuple!(f32);
impl_parse_tuple!(f64);
#[cfg(f16_enabled)]
impl_parse_tuple_via_rug!(f16);
#[cfg(f128_enabled)]
impl_parse_tuple_via_rug!(f128);
/// Try to parse the number, printing a nice message on failure.
fn parse<T: FromStr + FromStrRadix>(input: &[&str], idx: usize) -> T {
let s = input[idx];
let msg = || format!("invalid {} input '{s}'", type_name::<T>());
if s.starts_with("0x") || s.starts_with("-0x") {
return T::from_str_radix(s, 16).unwrap_or_else(|_| panic!("{}", msg()));
}
if s.starts_with("0b") {
return T::from_str_radix(s, 2).unwrap_or_else(|_| panic!("{}", msg()));
}
s.parse().unwrap_or_else(|_| panic!("{}", msg()))
}
/// Try to parse the float type going via `rug`, for `f16` and `f128` which don't yet implement
/// `FromStr`.
#[cfg(feature = "build-mpfr")]
fn parse_rug<F>(input: &[&str], idx: usize) -> F
where
F: libm_test::Float + FromStrRadix,
rug::Float: az::Cast<F>,
{
let s = input[idx];
let msg = || format!("invalid {} input '{s}'", type_name::<F>());
if s.starts_with("0x") {
return F::from_str_radix(s, 16).unwrap_or_else(|_| panic!("{}", msg()));
}
if s.starts_with("0b") {
return F::from_str_radix(s, 2).unwrap_or_else(|_| panic!("{}", msg()));
}
let x = rug::Float::parse(s).unwrap_or_else(|_| panic!("{}", msg()));
let x = rug::Float::with_val(F::BITS, x);
x.az()
}
trait FromStrRadix: Sized {
fn from_str_radix(s: &str, radix: u32) -> Result<Self, ParseIntError>;
}
impl FromStrRadix for i32 {
fn from_str_radix(s: &str, radix: u32) -> Result<Self, ParseIntError> {
let s = strip_radix_prefix(s, radix);
i32::from_str_radix(s, radix)
}
}
#[cfg(f16_enabled)]
impl FromStrRadix for f16 {
fn from_str_radix(s: &str, radix: u32) -> Result<Self, ParseIntError> {
if radix == 16 && s.contains("p") {
return Ok(libm::support::hf16(s));
}
let s = strip_radix_prefix(s, radix);
u16::from_str_radix(s, radix).map(Self::from_bits)
}
}
impl FromStrRadix for f32 {
fn from_str_radix(s: &str, radix: u32) -> Result<Self, ParseIntError> {
if radix == 16 && s.contains("p") {
// Parse as hex float
return Ok(hf32(s));
}
let s = strip_radix_prefix(s, radix);
u32::from_str_radix(s, radix).map(Self::from_bits)
}
}
impl FromStrRadix for f64 {
fn from_str_radix(s: &str, radix: u32) -> Result<Self, ParseIntError> {
if s.contains("p") {
return Ok(hf64(s));
}
let s = strip_radix_prefix(s, radix);
u64::from_str_radix(s, radix).map(Self::from_bits)
}
}
#[cfg(f128_enabled)]
impl FromStrRadix for f128 {
fn from_str_radix(s: &str, radix: u32) -> Result<Self, ParseIntError> {
if radix == 16 && s.contains("p") {
return Ok(libm::support::hf128(s));
}
let s = strip_radix_prefix(s, radix);
u128::from_str_radix(s, radix).map(Self::from_bits)
}
}
fn strip_radix_prefix(s: &str, radix: u32) -> &str {
if radix == 16 {
s.strip_prefix("0x").unwrap()
} else if radix == 2 {
s.strip_prefix("0b").unwrap()
} else {
s
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,164 @@
# autogenerated by update-api-list.py
acos
acosf
acosh
acoshf
asin
asinf
asinh
asinhf
atan
atan2
atan2f
atanf
atanh
atanhf
cbrt
cbrtf
ceil
ceilf
ceilf128
ceilf16
copysign
copysignf
copysignf128
copysignf16
cos
cosf
cosh
coshf
erf
erfc
erfcf
erff
exp
exp10
exp10f
exp2
exp2f
expf
expm1
expm1f
fabs
fabsf
fabsf128
fabsf16
fdim
fdimf
fdimf128
fdimf16
floor
floorf
floorf128
floorf16
fma
fmaf
fmaf128
fmax
fmaxf
fmaxf128
fmaxf16
fmaximum
fmaximum_num
fmaximum_numf
fmaximum_numf128
fmaximum_numf16
fmaximumf
fmaximumf128
fmaximumf16
fmin
fminf
fminf128
fminf16
fminimum
fminimum_num
fminimum_numf
fminimum_numf128
fminimum_numf16
fminimumf
fminimumf128
fminimumf16
fmod
fmodf
fmodf128
fmodf16
frexp
frexpf
hypot
hypotf
ilogb
ilogbf
j0
j0f
j1
j1f
jn
jnf
ldexp
ldexpf
ldexpf128
ldexpf16
lgamma
lgamma_r
lgammaf
lgammaf_r
log
log10
log10f
log1p
log1pf
log2
log2f
logf
modf
modff
nextafter
nextafterf
pow
powf
remainder
remainderf
remquo
remquof
rint
rintf
rintf128
rintf16
round
roundeven
roundevenf
roundevenf128
roundevenf16
roundf
roundf128
roundf16
scalbn
scalbnf
scalbnf128
scalbnf16
sin
sincos
sincosf
sinf
sinh
sinhf
sqrt
sqrtf
sqrtf128
sqrtf16
tan
tanf
tanh
tanhf
tgamma
tgammaf
trunc
truncf
truncf128
truncf16
y0
y0f
y1
y1f
yn
ynf

View file

@ -0,0 +1,359 @@
#!/usr/bin/env python3
"""Create a text file listing all public API. This can be used to ensure that all
functions are covered by our macros.
This file additionally does tidy-esque checks that all functions are listed where
needed, or that lists are sorted.
"""
import difflib
import json
import re
import subprocess as sp
import sys
from dataclasses import dataclass
from glob import glob
from pathlib import Path
from typing import Any, Callable, TypeAlias
SELF_PATH = Path(__file__)
ETC_DIR = SELF_PATH.parent
ROOT_DIR = ETC_DIR.parent
# These files do not trigger a retest.
IGNORED_SOURCES = ["libm/src/libm_helper.rs", "libm/src/math/support/float_traits.rs"]
IndexTy: TypeAlias = dict[str, dict[str, Any]]
"""Type of the `index` item in rustdoc's JSON output"""
def eprint(*args, **kwargs):
"""Print to stderr."""
print(*args, file=sys.stderr, **kwargs)
@dataclass
class Crate:
"""Representation of public interfaces and function defintion locations in
`libm`.
"""
public_functions: list[str]
"""List of all public functions."""
defs: dict[str, list[str]]
"""Map from `name->[source files]` to find all places that define a public
function. We track this to know which tests need to be rerun when specific files
get updated.
"""
types: dict[str, str]
"""Map from `name->type`."""
def __init__(self) -> None:
self.public_functions = []
self.defs = {}
self.types = {}
j = self.get_rustdoc_json()
index: IndexTy = j["index"]
self._init_function_list(index)
self._init_defs(index)
self._init_types()
@staticmethod
def get_rustdoc_json() -> dict[Any, Any]:
"""Get rustdoc's JSON output for the `libm` crate."""
j = sp.check_output(
[
"rustdoc",
"libm/src/lib.rs",
"--edition=2021",
"--document-private-items",
"--output-format=json",
"--cfg=f16_enabled",
"--cfg=f128_enabled",
"-Zunstable-options",
"-o-",
],
cwd=ROOT_DIR,
text=True,
)
j = json.loads(j)
return j
def _init_function_list(self, index: IndexTy) -> None:
"""Get a list of public functions from rustdoc JSON output.
Note that this only finds functions that are reexported in `lib.rs`, this will
need to be adjusted if we need to account for functions that are defined there, or
glob reexports in other locations.
"""
# Filter out items that are not public
public = [i for i in index.values() if i["visibility"] == "public"]
# Collect a list of source IDs for reexported items in `lib.rs` or `mod math`.
use = (i for i in public if "use" in i["inner"])
use = (
i
for i in use
if i["span"]["filename"] in ["libm/src/math/mod.rs", "libm/src/lib.rs"]
)
reexported_ids = [item["inner"]["use"]["id"] for item in use]
# Collect a list of reexported items that are functions
for id in reexported_ids:
srcitem = index.get(str(id))
# External crate
if srcitem is None:
continue
# Skip if not a function
if "function" not in srcitem["inner"]:
continue
self.public_functions.append(srcitem["name"])
self.public_functions.sort()
def _init_defs(self, index: IndexTy) -> None:
defs = {name: set() for name in self.public_functions}
funcs = (i for i in index.values() if "function" in i["inner"])
funcs = (f for f in funcs if f["name"] in self.public_functions)
for func in funcs:
defs[func["name"]].add(func["span"]["filename"])
# A lot of the `arch` module is often configured out so doesn't show up in docs. Use
# string matching as a fallback.
for fname in glob("libm/src/math/arch/**.rs", root_dir=ROOT_DIR):
contents = (ROOT_DIR.joinpath(fname)).read_text()
for name in self.public_functions:
if f"fn {name}" in contents:
defs[name].add(fname)
for name, sources in defs.items():
base_sources = defs[base_name(name)[0]]
for src in (s for s in base_sources if "generic" in s):
sources.add(src)
for src in IGNORED_SOURCES:
sources.discard(src)
# Sort the set
self.defs = {k: sorted(v) for (k, v) in defs.items()}
def _init_types(self) -> None:
self.types = {name: base_name(name)[1] for name in self.public_functions}
def write_function_list(self, check: bool) -> None:
"""Collect the list of public functions to a simple text file."""
output = "# autogenerated by update-api-list.py\n"
for name in self.public_functions:
output += f"{name}\n"
out_file = ETC_DIR.joinpath("function-list.txt")
if check:
with open(out_file, "r") as f:
current = f.read()
diff_and_exit(current, output, "function list")
else:
with open(out_file, "w") as f:
f.write(output)
def write_function_defs(self, check: bool) -> None:
"""Collect the list of information about public functions to a JSON file ."""
comment = (
"Autogenerated by update-api-list.py. "
"List of files that define a function with a given name. "
"This file is checked in to make it obvious if refactoring breaks things"
)
d = {"__comment": comment}
d |= {
name: {"sources": self.defs[name], "type": self.types[name]}
for name in self.public_functions
}
out_file = ETC_DIR.joinpath("function-definitions.json")
output = json.dumps(d, indent=4) + "\n"
if check:
with open(out_file, "r") as f:
current = f.read()
diff_and_exit(current, output, "source list")
else:
with open(out_file, "w") as f:
f.write(output)
def tidy_lists(self) -> None:
"""In each file, check annotations indicating blocks of code should be sorted or should
include all public API.
"""
flist = sp.check_output(["git", "ls-files"], cwd=ROOT_DIR, text=True)
for path in flist.splitlines():
fpath = ROOT_DIR.joinpath(path)
if fpath.is_dir() or fpath == SELF_PATH:
continue
lines = fpath.read_text().splitlines()
validate_delimited_block(
fpath,
lines,
"verify-sorted-start",
"verify-sorted-end",
ensure_sorted,
)
validate_delimited_block(
fpath,
lines,
"verify-apilist-start",
"verify-apilist-end",
lambda p, n, lines: self.ensure_contains_api(p, n, lines),
)
def ensure_contains_api(self, fpath: Path, line_num: int, lines: list[str]):
"""Given a list of strings, ensure that each public function we have is named
somewhere.
"""
not_found = []
for func in self.public_functions:
# The function name may be on its own or somewhere in a snake case string.
pat = re.compile(rf"(\b|_){func}(\b|_)")
found = next((line for line in lines if pat.search(line)), None)
if found is None:
not_found.append(func)
if len(not_found) == 0:
return
relpath = fpath.relative_to(ROOT_DIR)
eprint(f"functions not found at {relpath}:{line_num}: {not_found}")
exit(1)
def validate_delimited_block(
fpath: Path,
lines: list[str],
start: str,
end: str,
validate: Callable[[Path, int, list[str]], None],
) -> None:
"""Identify blocks of code wrapped within `start` and `end`, collect their contents
to a list of strings, and call `validate` for each of those lists.
"""
relpath = fpath.relative_to(ROOT_DIR)
block_lines = []
block_start_line: None | int = None
for line_num, line in enumerate(lines):
line_num += 1
if start in line:
block_start_line = line_num
continue
if end in line:
if block_start_line is None:
eprint(f"`{end}` without `{start}` at {relpath}:{line_num}")
exit(1)
validate(fpath, block_start_line, block_lines)
block_lines = []
block_start_line = None
continue
if block_start_line is not None:
block_lines.append(line)
if block_start_line is not None:
eprint(f"`{start}` without `{end}` at {relpath}:{block_start_line}")
exit(1)
def ensure_sorted(fpath: Path, block_start_line: int, lines: list[str]) -> None:
"""Ensure that a list of lines is sorted, otherwise print a diff and exit."""
relpath = fpath.relative_to(ROOT_DIR)
diff_and_exit(
"\n".join(lines),
"\n".join(sorted(lines)),
f"sorted block at {relpath}:{block_start_line}",
)
def diff_and_exit(actual: str, expected: str, name: str):
"""If the two strings are different, print a diff between them and then exit
with an error.
"""
if actual == expected:
print(f"{name} output matches expected; success")
return
a = [f"{line}\n" for line in actual.splitlines()]
b = [f"{line}\n" for line in expected.splitlines()]
diff = difflib.unified_diff(a, b, "actual", "expected")
sys.stdout.writelines(diff)
print(f"mismatched {name}")
exit(1)
def base_name(name: str) -> tuple[str, str]:
"""Return the basename and type from a full function name. Keep in sync with Rust's
`fn base_name`.
"""
known_mappings = [
("erff", ("erf", "f32")),
("erf", ("erf", "f64")),
("modff", ("modf", "f32")),
("modf", ("modf", "f64")),
("lgammaf_r", ("lgamma_r", "f32")),
("lgamma_r", ("lgamma_r", "f64")),
]
found = next((base for (full, base) in known_mappings if full == name), None)
if found is not None:
return found
if name.endswith("f"):
return (name.rstrip("f"), "f32")
if name.endswith("f16"):
return (name.rstrip("f16"), "f16")
if name.endswith("f128"):
return (name.rstrip("f128"), "f128")
return (name, "f64")
def ensure_updated_list(check: bool) -> None:
"""Runner to update the function list and JSON, or check that it is already up
to date.
"""
crate = Crate()
crate.write_function_list(check)
crate.write_function_defs(check)
crate.tidy_lists()
def main():
"""By default overwrite the file. If `--check` is passed, print a diff instead and
error if the files are different.
"""
match sys.argv:
case [_]:
ensure_updated_list(False)
case [_, "--check"]:
ensure_updated_list(True)
case _:
print("unrecognized arguments")
exit(1)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,49 @@
[package]
authors = ["Jorge Aparicio <jorge@japaric.io>"]
categories = ["no-std"]
description = "libm in pure Rust"
documentation = "https://docs.rs/libm"
keywords = ["libm", "math"]
license = "MIT"
name = "libm"
readme = "README.md"
repository = "https://github.com/rust-lang/libm"
version = "0.2.11"
edition = "2021"
rust-version = "1.63"
[features]
default = ["arch"]
# Enable architecture-specific features such as SIMD or assembly routines.
arch = []
# This tells the compiler to assume that a Nightly toolchain is being used and
# that it should activate any useful Nightly things accordingly.
unstable = ["unstable-intrinsics", "unstable-float"]
# Enable calls to functions in `core::intrinsics`
unstable-intrinsics = []
# Make some internal things public for testing.
unstable-public-internals = []
# Enable the nightly-only `f16` and `f128`.
unstable-float = []
# Used to prevent using any intrinsics or arch-specific code.
#
# HACK: this is a negative feature which is generally a bad idea in Cargo, but
# we need it to be able to forbid other features when this crate is used in
# Rust dependencies. Setting this overrides all features that may enable
# hard float operations.
force-soft-floats = []
[dev-dependencies]
no-panic = "0.1.35"
[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = [
# compiler-builtins sets this feature, but we use it in `libm`
'cfg(feature, values("compiler-builtins"))',
] }

View file

@ -0,0 +1 @@
../LICENSE.txt

View file

@ -0,0 +1 @@
../README.md

View file

@ -0,0 +1,18 @@
use std::env;
mod configure;
fn main() {
let cfg = configure::Config::from_env();
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=configure.rs");
println!("cargo:rustc-check-cfg=cfg(assert_no_panic)");
// If set, enable `no-panic`. Requires LTO (`release-opt` profile).
if env::var("ENSURE_NO_PANIC").is_ok() {
println!("cargo:rustc-cfg=assert_no_panic");
}
configure::emit_libm_config(&cfg);
}

View file

@ -0,0 +1,183 @@
// Configuration shared with both libm and libm-test
use std::env;
use std::path::PathBuf;
#[allow(dead_code)]
pub struct Config {
pub manifest_dir: PathBuf,
pub out_dir: PathBuf,
pub opt_level: String,
pub cargo_features: Vec<String>,
pub target_arch: String,
pub target_env: String,
pub target_family: Option<String>,
pub target_os: String,
pub target_string: String,
pub target_vendor: String,
pub target_features: Vec<String>,
}
impl Config {
pub fn from_env() -> Self {
let target_features = env::var("CARGO_CFG_TARGET_FEATURE")
.map(|feats| feats.split(',').map(ToOwned::to_owned).collect())
.unwrap_or_default();
let cargo_features = env::vars()
.filter_map(|(name, _value)| name.strip_prefix("CARGO_FEATURE_").map(ToOwned::to_owned))
.map(|s| s.to_lowercase().replace("_", "-"))
.collect();
Self {
manifest_dir: PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()),
out_dir: PathBuf::from(env::var("OUT_DIR").unwrap()),
opt_level: env::var("OPT_LEVEL").unwrap(),
cargo_features,
target_arch: env::var("CARGO_CFG_TARGET_ARCH").unwrap(),
target_env: env::var("CARGO_CFG_TARGET_ENV").unwrap(),
target_family: env::var("CARGO_CFG_TARGET_FAMILY").ok(),
target_os: env::var("CARGO_CFG_TARGET_OS").unwrap(),
target_string: env::var("TARGET").unwrap(),
target_vendor: env::var("CARGO_CFG_TARGET_VENDOR").unwrap(),
target_features,
}
}
}
/// Libm gets most config options made available.
#[allow(dead_code)]
pub fn emit_libm_config(cfg: &Config) {
emit_intrinsics_cfg();
emit_arch_cfg();
emit_optimization_cfg(cfg);
emit_cfg_shorthands(cfg);
emit_cfg_env(cfg);
emit_f16_f128_cfg(cfg);
}
/// Tests don't need most feature-related config.
#[allow(dead_code)]
pub fn emit_test_config(cfg: &Config) {
emit_optimization_cfg(cfg);
emit_cfg_shorthands(cfg);
emit_cfg_env(cfg);
emit_f16_f128_cfg(cfg);
}
/// Simplify the feature logic for enabling intrinsics so code only needs to use
/// `cfg(intrinsics_enabled)`.
fn emit_intrinsics_cfg() {
println!("cargo:rustc-check-cfg=cfg(intrinsics_enabled)");
// Disabled by default; `unstable-intrinsics` enables again; `force-soft-floats` overrides
// to disable.
if cfg!(feature = "unstable-intrinsics") && !cfg!(feature = "force-soft-floats") {
println!("cargo:rustc-cfg=intrinsics_enabled");
}
}
/// Simplify the feature logic for enabling arch-specific features so code only needs to use
/// `cfg(arch_enabled)`.
fn emit_arch_cfg() {
println!("cargo:rustc-check-cfg=cfg(arch_enabled)");
// Enabled by default via the "arch" feature, `force-soft-floats` overrides to disable.
if cfg!(feature = "arch") && !cfg!(feature = "force-soft-floats") {
println!("cargo:rustc-cfg=arch_enabled");
}
}
/// Some tests are extremely slow. Emit a config option based on optimization level.
fn emit_optimization_cfg(cfg: &Config) {
println!("cargo:rustc-check-cfg=cfg(optimizations_enabled)");
if !matches!(cfg.opt_level.as_str(), "0" | "1") {
println!("cargo:rustc-cfg=optimizations_enabled");
}
}
/// Provide an alias for common longer config combinations.
fn emit_cfg_shorthands(cfg: &Config) {
println!("cargo:rustc-check-cfg=cfg(x86_no_sse)");
if cfg.target_arch == "x86" && !cfg.target_features.iter().any(|f| f == "sse") {
// Shorthand to detect i586 targets
println!("cargo:rustc-cfg=x86_no_sse");
}
}
/// Reemit config that we make use of for test logging.
fn emit_cfg_env(cfg: &Config) {
println!("cargo:rustc-env=CFG_CARGO_FEATURES={:?}", cfg.cargo_features);
println!("cargo:rustc-env=CFG_OPT_LEVEL={}", cfg.opt_level);
println!("cargo:rustc-env=CFG_TARGET_FEATURES={:?}", cfg.target_features);
}
/// Configure whether or not `f16` and `f128` support should be enabled.
fn emit_f16_f128_cfg(cfg: &Config) {
println!("cargo:rustc-check-cfg=cfg(f16_enabled)");
println!("cargo:rustc-check-cfg=cfg(f128_enabled)");
// `unstable-float` enables these features.
if !cfg!(feature = "unstable-float") {
return;
}
// Set whether or not `f16` and `f128` are supported at a basic level by LLVM. This only means
// that the backend will not crash when using these types and generates code that can be called
// without crashing (no infinite recursion). This does not mean that the platform doesn't have
// ABI or other bugs.
//
// We do this here rather than in `rust-lang/rust` because configuring via cargo features is
// not straightforward.
//
// Original source of this list:
// <https://github.com/rust-lang/compiler-builtins/pull/652#issuecomment-2266151350>
let f16_enabled = match cfg.target_arch.as_str() {
// Unsupported <https://github.com/llvm/llvm-project/issues/94434>
"arm64ec" => false,
// Selection failure <https://github.com/llvm/llvm-project/issues/50374>
"s390x" => false,
// Infinite recursion <https://github.com/llvm/llvm-project/issues/97981>
// FIXME(llvm): loongarch fixed by <https://github.com/llvm/llvm-project/pull/107791>
"csky" => false,
"hexagon" => false,
"loongarch64" => false,
"mips" | "mips64" | "mips32r6" | "mips64r6" => false,
"powerpc" | "powerpc64" => false,
"sparc" | "sparc64" => false,
"wasm32" | "wasm64" => false,
// Most everything else works as of LLVM 19
_ => true,
};
let f128_enabled = match cfg.target_arch.as_str() {
// Unsupported (libcall is not supported) <https://github.com/llvm/llvm-project/issues/121122>
"amdgpu" => false,
// Unsupported <https://github.com/llvm/llvm-project/issues/94434>
"arm64ec" => false,
// Selection failure <https://github.com/llvm/llvm-project/issues/96432>
"mips64" | "mips64r6" => false,
// Selection failure <https://github.com/llvm/llvm-project/issues/95471>
"nvptx64" => false,
// Selection failure <https://github.com/llvm/llvm-project/issues/101545>
"powerpc64" if &cfg.target_os == "aix" => false,
// Selection failure <https://github.com/llvm/llvm-project/issues/41838>
"sparc" => false,
// Most everything else works as of LLVM 19
_ => true,
};
// If the feature is set, disable these types.
let disable_both = env::var_os("CARGO_FEATURE_NO_F16_F128").is_some();
println!("cargo:rustc-check-cfg=cfg(f16_enabled)");
println!("cargo:rustc-check-cfg=cfg(f128_enabled)");
if f16_enabled && !disable_both {
println!("cargo:rustc-cfg=f16_enabled");
}
if f128_enabled && !disable_both {
println!("cargo:rustc-cfg=f128_enabled");
}
}

View file

@ -0,0 +1,29 @@
//! libm in pure Rust
#![no_std]
#![cfg_attr(intrinsics_enabled, allow(internal_features))]
#![cfg_attr(intrinsics_enabled, feature(core_intrinsics))]
#![cfg_attr(all(intrinsics_enabled, target_family = "wasm"), feature(wasm_numeric_instr))]
#![cfg_attr(f128_enabled, feature(f128))]
#![cfg_attr(f16_enabled, feature(f16))]
#![allow(clippy::assign_op_pattern)]
#![allow(clippy::deprecated_cfg_attr)]
#![allow(clippy::eq_op)]
#![allow(clippy::excessive_precision)]
#![allow(clippy::float_cmp)]
#![allow(clippy::int_plus_one)]
#![allow(clippy::many_single_char_names)]
#![allow(clippy::mixed_case_hex_literals)]
#![allow(clippy::needless_late_init)]
#![allow(clippy::needless_return)]
#![allow(clippy::unreadable_literal)]
#![allow(clippy::zero_divided_by_zero)]
#![forbid(unsafe_op_in_unsafe_fn)]
mod libm_helper;
mod math;
use core::{f32, f64};
pub use libm_helper::*;
pub use self::math::*;

View file

@ -0,0 +1,244 @@
use core::marker::PhantomData;
use crate::*;
/// Generic helper for libm functions, abstracting over f32 and f64. <br/>
/// # Type Parameter:
/// - `T`: Either `f32` or `f64`
///
/// # Examples
/// ```rust
/// use libm::{self, Libm};
///
/// const PI_F32: f32 = 3.1415927410e+00;
/// const PI_F64: f64 = 3.1415926535897931160e+00;
///
/// assert!(Libm::<f32>::cos(0.0f32) == libm::cosf(0.0));
/// assert!(Libm::<f32>::sin(PI_F32) == libm::sinf(PI_F32));
///
/// assert!(Libm::<f64>::cos(0.0f64) == libm::cos(0.0));
/// assert!(Libm::<f64>::sin(PI_F64) == libm::sin(PI_F64));
/// ```
pub struct Libm<T>(PhantomData<T>);
macro_rules! libm_helper {
($t:ident, funcs: $funcs:tt) => {
impl Libm<$t> {
#![allow(unused_parens)]
libm_helper! { $funcs }
}
};
({$($func:tt;)*}) => {
$(
libm_helper! { $func }
)*
};
((fn $func:ident($($arg:ident: $arg_typ:ty),*) -> ($($ret_typ:ty),*); => $libm_fn:ident)) => {
#[inline(always)]
pub fn $func($($arg: $arg_typ),*) -> ($($ret_typ),*) {
$libm_fn($($arg),*)
}
};
}
// verify-apilist-start
libm_helper! {
f32,
funcs: {
// verify-sorted-start
(fn acos(x: f32) -> (f32); => acosf);
(fn acosh(x: f32) -> (f32); => acoshf);
(fn asin(x: f32) -> (f32); => asinf);
(fn asinh(x: f32) -> (f32); => asinhf);
(fn atan(x: f32) -> (f32); => atanf);
(fn atan2(y: f32, x: f32) -> (f32); => atan2f);
(fn atanh(x: f32) -> (f32); => atanhf);
(fn cbrt(x: f32) -> (f32); => cbrtf);
(fn ceil(x: f32) -> (f32); => ceilf);
(fn copysign(x: f32, y: f32) -> (f32); => copysignf);
(fn cos(x: f32) -> (f32); => cosf);
(fn cosh(x: f32) -> (f32); => coshf);
(fn erf(x: f32) -> (f32); => erff);
(fn erfc(x: f32) -> (f32); => erfcf);
(fn exp(x: f32) -> (f32); => expf);
(fn exp10(x: f32) -> (f32); => exp10f);
(fn exp2(x: f32) -> (f32); => exp2f);
(fn expm1(x: f32) -> (f32); => expm1f);
(fn fabs(x: f32) -> (f32); => fabsf);
(fn fdim(x: f32, y: f32) -> (f32); => fdimf);
(fn floor(x: f32) -> (f32); => floorf);
(fn fma(x: f32, y: f32, z: f32) -> (f32); => fmaf);
(fn fmax(x: f32, y: f32) -> (f32); => fmaxf);
(fn fmin(x: f32, y: f32) -> (f32); => fminf);
(fn fmod(x: f32, y: f32) -> (f32); => fmodf);
(fn frexp(x: f32) -> (f32, i32); => frexpf);
(fn hypot(x: f32, y: f32) -> (f32); => hypotf);
(fn ilogb(x: f32) -> (i32); => ilogbf);
(fn j0(x: f32) -> (f32); => j0f);
(fn j1(x: f32) -> (f32); => j1f);
(fn jn(n: i32, x: f32) -> (f32); => jnf);
(fn ldexp(x: f32, n: i32) -> (f32); => ldexpf);
(fn lgamma(x: f32) -> (f32); => lgammaf);
(fn lgamma_r(x: f32) -> (f32, i32); => lgammaf_r);
(fn log(x: f32) -> (f32); => logf);
(fn log10(x: f32) -> (f32); => log10f);
(fn log1p(x: f32) -> (f32); => log1pf);
(fn log2(x: f32) -> (f32); => log2f);
(fn modf(x: f32) -> (f32, f32); => modff);
(fn nextafter(x: f32, y: f32) -> (f32); => nextafterf);
(fn pow(x: f32, y: f32) -> (f32); => powf);
(fn remainder(x: f32, y: f32) -> (f32); => remainderf);
(fn remquo(x: f32, y: f32) -> (f32, i32); => remquof);
(fn rint(x: f32) -> (f32); => rintf);
(fn round(x: f32) -> (f32); => roundf);
(fn roundeven(x: f32) -> (f32); => roundevenf);
(fn scalbn(x: f32, n: i32) -> (f32); => scalbnf);
(fn sin(x: f32) -> (f32); => sinf);
(fn sincos(x: f32) -> (f32, f32); => sincosf);
(fn sinh(x: f32) -> (f32); => sinhf);
(fn sqrt(x: f32) -> (f32); => sqrtf);
(fn tan(x: f32) -> (f32); => tanf);
(fn tanh(x: f32) -> (f32); => tanhf);
(fn tgamma(x: f32) -> (f32); => tgammaf);
(fn trunc(x: f32) -> (f32); => truncf);
(fn y0(x: f32) -> (f32); => y0f);
(fn y1(x: f32) -> (f32); => y1f);
(fn yn(n: i32, x: f32) -> (f32); => ynf);
// verify-sorted-end
}
}
libm_helper! {
f64,
funcs: {
// verify-sorted-start
(fn acos(x: f64) -> (f64); => acos);
(fn acosh(x: f64) -> (f64); => acosh);
(fn asin(x: f64) -> (f64); => asin);
(fn asinh(x: f64) -> (f64); => asinh);
(fn atan(x: f64) -> (f64); => atan);
(fn atan2(y: f64, x: f64) -> (f64); => atan2);
(fn atanh(x: f64) -> (f64); => atanh);
(fn cbrt(x: f64) -> (f64); => cbrt);
(fn ceil(x: f64) -> (f64); => ceil);
(fn copysign(x: f64, y: f64) -> (f64); => copysign);
(fn cos(x: f64) -> (f64); => cos);
(fn cosh(x: f64) -> (f64); => cosh);
(fn erf(x: f64) -> (f64); => erf);
(fn erfc(x: f64) -> (f64); => erfc);
(fn exp(x: f64) -> (f64); => exp);
(fn exp10(x: f64) -> (f64); => exp10);
(fn exp2(x: f64) -> (f64); => exp2);
(fn expm1(x: f64) -> (f64); => expm1);
(fn fabs(x: f64) -> (f64); => fabs);
(fn fdim(x: f64, y: f64) -> (f64); => fdim);
(fn floor(x: f64) -> (f64); => floor);
(fn fma(x: f64, y: f64, z: f64) -> (f64); => fma);
(fn fmax(x: f64, y: f64) -> (f64); => fmax);
(fn fmaximum(x: f64, y: f64) -> (f64); => fmaximum);
(fn fmaximum_num(x: f64, y: f64) -> (f64); => fmaximum_num);
(fn fmaximum_numf(x: f32, y: f32) -> (f32); => fmaximum_numf);
(fn fmaximumf(x: f32, y: f32) -> (f32); => fmaximumf);
(fn fmin(x: f64, y: f64) -> (f64); => fmin);
(fn fminimum(x: f64, y: f64) -> (f64); => fminimum);
(fn fminimum_num(x: f64, y: f64) -> (f64); => fminimum_num);
(fn fminimum_numf(x: f32, y: f32) -> (f32); => fminimum_numf);
(fn fminimumf(x: f32, y: f32) -> (f32); => fminimumf);
(fn fmod(x: f64, y: f64) -> (f64); => fmod);
(fn frexp(x: f64) -> (f64, i32); => frexp);
(fn hypot(x: f64, y: f64) -> (f64); => hypot);
(fn ilogb(x: f64) -> (i32); => ilogb);
(fn j0(x: f64) -> (f64); => j0);
(fn j1(x: f64) -> (f64); => j1);
(fn jn(n: i32, x: f64) -> (f64); => jn);
(fn ldexp(x: f64, n: i32) -> (f64); => ldexp);
(fn lgamma(x: f64) -> (f64); => lgamma);
(fn lgamma_r(x: f64) -> (f64, i32); => lgamma_r);
(fn log(x: f64) -> (f64); => log);
(fn log10(x: f64) -> (f64); => log10);
(fn log1p(x: f64) -> (f64); => log1p);
(fn log2(x: f64) -> (f64); => log2);
(fn modf(x: f64) -> (f64, f64); => modf);
(fn nextafter(x: f64, y: f64) -> (f64); => nextafter);
(fn pow(x: f64, y: f64) -> (f64); => pow);
(fn remainder(x: f64, y: f64) -> (f64); => remainder);
(fn remquo(x: f64, y: f64) -> (f64, i32); => remquo);
(fn rint(x: f64) -> (f64); => rint);
(fn round(x: f64) -> (f64); => round);
(fn roundevem(x: f64) -> (f64); => roundeven);
(fn scalbn(x: f64, n: i32) -> (f64); => scalbn);
(fn sin(x: f64) -> (f64); => sin);
(fn sincos(x: f64) -> (f64, f64); => sincos);
(fn sinh(x: f64) -> (f64); => sinh);
(fn sqrt(x: f64) -> (f64); => sqrt);
(fn tan(x: f64) -> (f64); => tan);
(fn tanh(x: f64) -> (f64); => tanh);
(fn tgamma(x: f64) -> (f64); => tgamma);
(fn trunc(x: f64) -> (f64); => trunc);
(fn y0(x: f64) -> (f64); => y0);
(fn y1(x: f64) -> (f64); => y1);
(fn yn(n: i32, x: f64) -> (f64); => yn);
// verify-sorted-end
}
}
#[cfg(f16_enabled)]
libm_helper! {
f16,
funcs: {
// verify-sorted-start
(fn ceil(x: f16) -> (f16); => ceilf16);
(fn copysign(x: f16, y: f16) -> (f16); => copysignf16);
(fn fabs(x: f16) -> (f16); => fabsf16);
(fn fdim(x: f16, y: f16) -> (f16); => fdimf16);
(fn floor(x: f16) -> (f16); => floorf16);
(fn fmax(x: f16, y: f16) -> (f16); => fmaxf16);
(fn fmaximum_num(x: f16, y: f16) -> (f16); => fmaximum_numf16);
(fn fmaximumf16(x: f16, y: f16) -> (f16); => fmaximumf16);
(fn fmin(x: f16, y: f16) -> (f16); => fminf16);
(fn fminimum(x: f16, y: f16) -> (f16); => fminimumf16);
(fn fminimum_num(x: f16, y: f16) -> (f16); => fminimum_numf16);
(fn fmod(x: f16, y: f16) -> (f16); => fmodf16);
(fn ldexp(x: f16, n: i32) -> (f16); => ldexpf16);
(fn rint(x: f16) -> (f16); => rintf16);
(fn round(x: f16) -> (f16); => roundf16);
(fn roundeven(x: f16) -> (f16); => roundevenf16);
(fn scalbn(x: f16, n: i32) -> (f16); => scalbnf16);
(fn sqrtf(x: f16) -> (f16); => sqrtf16);
(fn truncf(x: f16) -> (f16); => truncf16);
// verify-sorted-end
}
}
#[cfg(f128_enabled)]
libm_helper! {
f128,
funcs: {
// verify-sorted-start
(fn ceil(x: f128) -> (f128); => ceilf128);
(fn copysign(x: f128, y: f128) -> (f128); => copysignf128);
(fn fabs(x: f128) -> (f128); => fabsf128);
(fn fdim(x: f128, y: f128) -> (f128); => fdimf128);
(fn floor(x: f128) -> (f128); => floorf128);
(fn fma(x: f128, y: f128, z: f128) -> (f128); => fmaf128);
(fn fmax(x: f128, y: f128) -> (f128); => fmaxf128);
(fn fmaximum(x: f128, y: f128) -> (f128); => fmaximumf128);
(fn fmaximum_num(x: f128, y: f128) -> (f128); => fmaximum_numf128);
(fn fmin(x: f128, y: f128) -> (f128); => fminf128);
(fn fminimum(x: f128, y: f128) -> (f128); => fminimumf128);
(fn fminimum_num(x: f128, y: f128) -> (f128); => fminimum_numf128);
(fn fmod(x: f128, y: f128) -> (f128); => fmodf128);
(fn ldexp(x: f128, n: i32) -> (f128); => ldexpf128);
(fn rint(x: f128) -> (f128); => rintf128);
(fn round(x: f128) -> (f128); => roundf128);
(fn roundeven(x: f128) -> (f128); => roundevenf128);
(fn scalbn(x: f128, n: i32) -> (f128); => scalbnf128);
(fn sqrt(x: f128) -> (f128); => sqrtf128);
(fn trunc(x: f128) -> (f128); => truncf128);
// verify-sorted-end
}
}
// verify-apilist-end

View file

@ -0,0 +1,112 @@
/* origin: FreeBSD /usr/src/lib/msun/src/e_acos.c */
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunSoft, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
/* acos(x)
* Method :
* acos(x) = pi/2 - asin(x)
* acos(-x) = pi/2 + asin(x)
* For |x|<=0.5
* acos(x) = pi/2 - (x + x*x^2*R(x^2)) (see asin.c)
* For x>0.5
* acos(x) = pi/2 - (pi/2 - 2asin(sqrt((1-x)/2)))
* = 2asin(sqrt((1-x)/2))
* = 2s + 2s*z*R(z) ...z=(1-x)/2, s=sqrt(z)
* = 2f + (2c + 2s*z*R(z))
* where f=hi part of s, and c = (z-f*f)/(s+f) is the correction term
* for f so that f+c ~ sqrt(z).
* For x<-0.5
* acos(x) = pi - 2asin(sqrt((1-|x|)/2))
* = pi - 0.5*(s+s*z*R(z)), where z=(1-|x|)/2,s=sqrt(z)
*
* Special cases:
* if x is NaN, return x itself;
* if |x|>1, return NaN with invalid signal.
*
* Function needed: sqrt
*/
use super::sqrt;
const PIO2_HI: f64 = 1.57079632679489655800e+00; /* 0x3FF921FB, 0x54442D18 */
const PIO2_LO: f64 = 6.12323399573676603587e-17; /* 0x3C91A626, 0x33145C07 */
const PS0: f64 = 1.66666666666666657415e-01; /* 0x3FC55555, 0x55555555 */
const PS1: f64 = -3.25565818622400915405e-01; /* 0xBFD4D612, 0x03EB6F7D */
const PS2: f64 = 2.01212532134862925881e-01; /* 0x3FC9C155, 0x0E884455 */
const PS3: f64 = -4.00555345006794114027e-02; /* 0xBFA48228, 0xB5688F3B */
const PS4: f64 = 7.91534994289814532176e-04; /* 0x3F49EFE0, 0x7501B288 */
const PS5: f64 = 3.47933107596021167570e-05; /* 0x3F023DE1, 0x0DFDF709 */
const QS1: f64 = -2.40339491173441421878e+00; /* 0xC0033A27, 0x1C8A2D4B */
const QS2: f64 = 2.02094576023350569471e+00; /* 0x40002AE5, 0x9C598AC8 */
const QS3: f64 = -6.88283971605453293030e-01; /* 0xBFE6066C, 0x1B8D0159 */
const QS4: f64 = 7.70381505559019352791e-02; /* 0x3FB3B8C5, 0xB12E9282 */
fn r(z: f64) -> f64 {
let p: f64 = z * (PS0 + z * (PS1 + z * (PS2 + z * (PS3 + z * (PS4 + z * PS5)))));
let q: f64 = 1.0 + z * (QS1 + z * (QS2 + z * (QS3 + z * QS4)));
p / q
}
/// Arccosine (f64)
///
/// Computes the inverse cosine (arc cosine) of the input value.
/// Arguments must be in the range -1 to 1.
/// Returns values in radians, in the range of 0 to pi.
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn acos(x: f64) -> f64 {
let x1p_120f = f64::from_bits(0x3870000000000000); // 0x1p-120 === 2 ^ -120
let z: f64;
let w: f64;
let s: f64;
let c: f64;
let df: f64;
let hx: u32;
let ix: u32;
hx = (x.to_bits() >> 32) as u32;
ix = hx & 0x7fffffff;
/* |x| >= 1 or nan */
if ix >= 0x3ff00000 {
let lx: u32 = x.to_bits() as u32;
if ((ix - 0x3ff00000) | lx) == 0 {
/* acos(1)=0, acos(-1)=pi */
if (hx >> 31) != 0 {
return 2. * PIO2_HI + x1p_120f;
}
return 0.;
}
return 0. / (x - x);
}
/* |x| < 0.5 */
if ix < 0x3fe00000 {
if ix <= 0x3c600000 {
/* |x| < 2**-57 */
return PIO2_HI + x1p_120f;
}
return PIO2_HI - (x - (PIO2_LO - x * r(x * x)));
}
/* x < -0.5 */
if (hx >> 31) != 0 {
z = (1.0 + x) * 0.5;
s = sqrt(z);
w = r(z) * s - PIO2_LO;
return 2. * (PIO2_HI - (s + w));
}
/* x > 0.5 */
z = (1.0 - x) * 0.5;
s = sqrt(z);
// Set the low 4 bytes to zero
df = f64::from_bits(s.to_bits() & 0xff_ff_ff_ff_00_00_00_00);
c = (z - df * df) / (s + df);
w = r(z) * s + c;
2. * (df + w)
}

View file

@ -0,0 +1,79 @@
/* origin: FreeBSD /usr/src/lib/msun/src/e_acosf.c */
/*
* Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
*/
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
use super::sqrt::sqrtf;
const PIO2_HI: f32 = 1.5707962513e+00; /* 0x3fc90fda */
const PIO2_LO: f32 = 7.5497894159e-08; /* 0x33a22168 */
const P_S0: f32 = 1.6666586697e-01;
const P_S1: f32 = -4.2743422091e-02;
const P_S2: f32 = -8.6563630030e-03;
const Q_S1: f32 = -7.0662963390e-01;
fn r(z: f32) -> f32 {
let p = z * (P_S0 + z * (P_S1 + z * P_S2));
let q = 1. + z * Q_S1;
p / q
}
/// Arccosine (f32)
///
/// Computes the inverse cosine (arc cosine) of the input value.
/// Arguments must be in the range -1 to 1.
/// Returns values in radians, in the range of 0 to pi.
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn acosf(x: f32) -> f32 {
let x1p_120 = f32::from_bits(0x03800000); // 0x1p-120 === 2 ^ (-120)
let z: f32;
let w: f32;
let s: f32;
let mut hx = x.to_bits();
let ix = hx & 0x7fffffff;
/* |x| >= 1 or nan */
if ix >= 0x3f800000 {
if ix == 0x3f800000 {
if (hx >> 31) != 0 {
return 2. * PIO2_HI + x1p_120;
}
return 0.;
}
return 0. / (x - x);
}
/* |x| < 0.5 */
if ix < 0x3f000000 {
if ix <= 0x32800000 {
/* |x| < 2**-26 */
return PIO2_HI + x1p_120;
}
return PIO2_HI - (x - (PIO2_LO - x * r(x * x)));
}
/* x < -0.5 */
if (hx >> 31) != 0 {
z = (1. + x) * 0.5;
s = sqrtf(z);
w = r(z) * s - PIO2_LO;
return 2. * (PIO2_HI - (s + w));
}
/* x > 0.5 */
z = (1. - x) * 0.5;
s = sqrtf(z);
hx = s.to_bits();
let df = f32::from_bits(hx & 0xfffff000);
let c = (z - df * df) / (s + df);
w = r(z) * s + c;
2. * (df + w)
}

View file

@ -0,0 +1,27 @@
use super::{log, log1p, sqrt};
const LN2: f64 = 0.693147180559945309417232121458176568; /* 0x3fe62e42, 0xfefa39ef*/
/// Inverse hyperbolic cosine (f64)
///
/// Calculates the inverse hyperbolic cosine of `x`.
/// Is defined as `log(x + sqrt(x*x-1))`.
/// `x` must be a number greater than or equal to 1.
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn acosh(x: f64) -> f64 {
let u = x.to_bits();
let e = ((u >> 52) as usize) & 0x7ff;
/* x < 1 domain error is handled in the called functions */
if e < 0x3ff + 1 {
/* |x| < 2, up to 2ulp error in [1,1.125] */
return log1p(x - 1.0 + sqrt((x - 1.0) * (x - 1.0) + 2.0 * (x - 1.0)));
}
if e < 0x3ff + 26 {
/* |x| < 0x1p26 */
return log(2.0 * x - 1.0 / (x + sqrt(x * x - 1.0)));
}
/* |x| >= 0x1p26 or nan */
return log(x) + LN2;
}

View file

@ -0,0 +1,26 @@
use super::{log1pf, logf, sqrtf};
const LN2: f32 = 0.693147180559945309417232121458176568;
/// Inverse hyperbolic cosine (f32)
///
/// Calculates the inverse hyperbolic cosine of `x`.
/// Is defined as `log(x + sqrt(x*x-1))`.
/// `x` must be a number greater than or equal to 1.
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn acoshf(x: f32) -> f32 {
let u = x.to_bits();
let a = u & 0x7fffffff;
if a < 0x3f800000 + (1 << 23) {
/* |x| < 2, invalid if x < 1 or nan */
/* up to 2ulp error in [1,1.125] */
return log1pf(x - 1.0 + sqrtf((x - 1.0) * (x - 1.0) + 2.0 * (x - 1.0)));
}
if a < 0x3f800000 + (12 << 23) {
/* |x| < 0x1p12 */
return logf(2.0 * x - 1.0 / (x + sqrtf(x * x - 1.0)));
}
/* x >= 0x1p12 */
return logf(x) + LN2;
}

View file

@ -0,0 +1,115 @@
//! Architecture-specific support for aarch64 with neon.
use core::arch::asm;
pub fn fma(mut x: f64, y: f64, z: f64) -> f64 {
// SAFETY: `fmadd` is available with neon and has no side effects.
unsafe {
asm!(
"fmadd {x:d}, {x:d}, {y:d}, {z:d}",
x = inout(vreg) x,
y = in(vreg) y,
z = in(vreg) z,
options(nomem, nostack, pure)
);
}
x
}
pub fn fmaf(mut x: f32, y: f32, z: f32) -> f32 {
// SAFETY: `fmadd` is available with neon and has no side effects.
unsafe {
asm!(
"fmadd {x:s}, {x:s}, {y:s}, {z:s}",
x = inout(vreg) x,
y = in(vreg) y,
z = in(vreg) z,
options(nomem, nostack, pure)
);
}
x
}
pub fn rint(mut x: f64) -> f64 {
// SAFETY: `frintn` is available with neon and has no side effects.
//
// `frintn` is always round-to-nearest which does not match the C specification, but Rust does
// not support rounding modes.
unsafe {
asm!(
"frintn {x:d}, {x:d}",
x = inout(vreg) x,
options(nomem, nostack, pure)
);
}
x
}
pub fn rintf(mut x: f32) -> f32 {
// SAFETY: `frintn` is available with neon and has no side effects.
//
// `frintn` is always round-to-nearest which does not match the C specification, but Rust does
// not support rounding modes.
unsafe {
asm!(
"frintn {x:s}, {x:s}",
x = inout(vreg) x,
options(nomem, nostack, pure)
);
}
x
}
#[cfg(all(f16_enabled, target_feature = "fp16"))]
pub fn rintf16(mut x: f16) -> f16 {
// SAFETY: `frintn` is available for `f16` with `fp16` (implies `neon`) and has no side effects.
//
// `frintn` is always round-to-nearest which does not match the C specification, but Rust does
// not support rounding modes.
unsafe {
asm!(
"frintn {x:h}, {x:h}",
x = inout(vreg) x,
options(nomem, nostack, pure)
);
}
x
}
pub fn sqrt(mut x: f64) -> f64 {
// SAFETY: `fsqrt` is available with neon and has no side effects.
unsafe {
asm!(
"fsqrt {x:d}, {x:d}",
x = inout(vreg) x,
options(nomem, nostack, pure)
);
}
x
}
pub fn sqrtf(mut x: f32) -> f32 {
// SAFETY: `fsqrt` is available with neon and has no side effects.
unsafe {
asm!(
"fsqrt {x:s}, {x:s}",
x = inout(vreg) x,
options(nomem, nostack, pure)
);
}
x
}
#[cfg(all(f16_enabled, target_feature = "fp16"))]
pub fn sqrtf16(mut x: f16) -> f16 {
// SAFETY: `fsqrt` is available for `f16` with `fp16` (implies `neon`) and has no
// side effects.
unsafe {
asm!(
"fsqrt {x:h}, {x:h}",
x = inout(vreg) x,
options(nomem, nostack, pure)
);
}
x
}

View file

@ -0,0 +1,37 @@
//! Architecture-specific support for x86-32 without SSE2
use super::super::fabs;
/// Use an alternative implementation on x86, because the
/// main implementation fails with the x87 FPU used by
/// debian i386, probably due to excess precision issues.
/// Basic implementation taken from https://github.com/rust-lang/libm/issues/219.
pub fn ceil(x: f64) -> f64 {
if fabs(x).to_bits() < 4503599627370496.0_f64.to_bits() {
let truncated = x as i64 as f64;
if truncated < x {
return truncated + 1.0;
} else {
return truncated;
}
} else {
return x;
}
}
/// Use an alternative implementation on x86, because the
/// main implementation fails with the x87 FPU used by
/// debian i386, probably due to excess precision issues.
/// Basic implementation taken from https://github.com/rust-lang/libm/issues/219.
pub fn floor(x: f64) -> f64 {
if fabs(x).to_bits() < 4503599627370496.0_f64.to_bits() {
let truncated = x as i64 as f64;
if truncated > x {
return truncated - 1.0;
} else {
return truncated;
}
} else {
return x;
}
}

View file

@ -0,0 +1,27 @@
//! Architecture-specific support for x86-32 and x86-64 with SSE2
pub fn sqrtf(mut x: f32) -> f32 {
// SAFETY: `sqrtss` is part of `sse2`, which this module is gated behind. It has no memory
// access or side effects.
unsafe {
core::arch::asm!(
"sqrtss {x}, {x}",
x = inout(xmm_reg) x,
options(nostack, nomem, pure),
)
};
x
}
pub fn sqrt(mut x: f64) -> f64 {
// SAFETY: `sqrtsd` is part of `sse2`, which this module is gated behind. It has no memory
// access or side effects.
unsafe {
core::arch::asm!(
"sqrtsd {x}, {x}",
x = inout(xmm_reg) x,
options(nostack, nomem, pure),
)
};
x
}

Some files were not shown because too many files have changed in this diff Show more